vmwgfx_cotable.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /**************************************************************************
  2. *
  3. * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Treat context OTables as resources to make use of the resource
  29. * backing MOB eviction mechanism, that is used to read back the COTable
  30. * whenever the backing MOB is evicted.
  31. */
  32. #include "vmwgfx_drv.h"
  33. #include "vmwgfx_resource_priv.h"
  34. #include <ttm/ttm_placement.h>
  35. #include "vmwgfx_so.h"
  36. /**
  37. * struct vmw_cotable - Context Object Table resource
  38. *
  39. * @res: struct vmw_resource we are deriving from.
  40. * @ctx: non-refcounted pointer to the owning context.
  41. * @size_read_back: Size of data read back during eviction.
  42. * @seen_entries: Seen entries in command stream for this cotable.
  43. * @type: The cotable type.
  44. * @scrubbed: Whether the cotable has been scrubbed.
  45. * @resource_list: List of resources in the cotable.
  46. */
  47. struct vmw_cotable {
  48. struct vmw_resource res;
  49. struct vmw_resource *ctx;
  50. size_t size_read_back;
  51. int seen_entries;
  52. u32 type;
  53. bool scrubbed;
  54. struct list_head resource_list;
  55. };
  56. /**
  57. * struct vmw_cotable_info - Static info about cotable types
  58. *
  59. * @min_initial_entries: Min number of initial intries at cotable allocation
  60. * for this cotable type.
  61. * @size: Size of each entry.
  62. */
  63. struct vmw_cotable_info {
  64. u32 min_initial_entries;
  65. u32 size;
  66. void (*unbind_func)(struct vmw_private *, struct list_head *,
  67. bool);
  68. };
  69. static const struct vmw_cotable_info co_info[] = {
  70. {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
  71. {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
  72. {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
  73. {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
  74. {1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
  75. {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
  76. {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
  77. {1, sizeof(SVGACOTableDXSamplerEntry), NULL},
  78. {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
  79. {1, sizeof(SVGACOTableDXQueryEntry), NULL},
  80. {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
  81. };
  82. /*
  83. * Cotables with bindings that we remove must be scrubbed first,
  84. * otherwise, the device will swap in an invalid context when we remove
  85. * bindings before scrubbing a cotable...
  86. */
  87. const SVGACOTableType vmw_cotable_scrub_order[] = {
  88. SVGA_COTABLE_RTVIEW,
  89. SVGA_COTABLE_DSVIEW,
  90. SVGA_COTABLE_SRVIEW,
  91. SVGA_COTABLE_DXSHADER,
  92. SVGA_COTABLE_ELEMENTLAYOUT,
  93. SVGA_COTABLE_BLENDSTATE,
  94. SVGA_COTABLE_DEPTHSTENCIL,
  95. SVGA_COTABLE_RASTERIZERSTATE,
  96. SVGA_COTABLE_SAMPLER,
  97. SVGA_COTABLE_STREAMOUTPUT,
  98. SVGA_COTABLE_DXQUERY,
  99. };
  100. static int vmw_cotable_bind(struct vmw_resource *res,
  101. struct ttm_validate_buffer *val_buf);
  102. static int vmw_cotable_unbind(struct vmw_resource *res,
  103. bool readback,
  104. struct ttm_validate_buffer *val_buf);
  105. static int vmw_cotable_create(struct vmw_resource *res);
  106. static int vmw_cotable_destroy(struct vmw_resource *res);
  107. static const struct vmw_res_func vmw_cotable_func = {
  108. .res_type = vmw_res_cotable,
  109. .needs_backup = true,
  110. .may_evict = true,
  111. .type_name = "context guest backed object tables",
  112. .backup_placement = &vmw_mob_placement,
  113. .create = vmw_cotable_create,
  114. .destroy = vmw_cotable_destroy,
  115. .bind = vmw_cotable_bind,
  116. .unbind = vmw_cotable_unbind,
  117. };
  118. /**
  119. * vmw_cotable - Convert a struct vmw_resource pointer to a struct
  120. * vmw_cotable pointer
  121. *
  122. * @res: Pointer to the resource.
  123. */
  124. static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
  125. {
  126. return container_of(res, struct vmw_cotable, res);
  127. }
  128. /**
  129. * vmw_cotable_destroy - Cotable resource destroy callback
  130. *
  131. * @res: Pointer to the cotable resource.
  132. *
  133. * There is no device cotable destroy command, so this function only
  134. * makes sure that the resource id is set to invalid.
  135. */
  136. static int vmw_cotable_destroy(struct vmw_resource *res)
  137. {
  138. res->id = -1;
  139. return 0;
  140. }
  141. /**
  142. * vmw_cotable_unscrub - Undo a cotable unscrub operation
  143. *
  144. * @res: Pointer to the cotable resource
  145. *
  146. * This function issues commands to (re)bind the cotable to
  147. * its backing mob, which needs to be validated and reserved at this point.
  148. * This is identical to bind() except the function interface looks different.
  149. */
  150. static int vmw_cotable_unscrub(struct vmw_resource *res)
  151. {
  152. struct vmw_cotable *vcotbl = vmw_cotable(res);
  153. struct vmw_private *dev_priv = res->dev_priv;
  154. struct ttm_buffer_object *bo = &res->backup->base;
  155. struct {
  156. SVGA3dCmdHeader header;
  157. SVGA3dCmdDXSetCOTable body;
  158. } *cmd;
  159. WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
  160. lockdep_assert_held(&bo->resv->lock.base);
  161. cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
  162. if (!cmd) {
  163. DRM_ERROR("Failed reserving FIFO space for cotable "
  164. "binding.\n");
  165. return -ENOMEM;
  166. }
  167. WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
  168. WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
  169. cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
  170. cmd->header.size = sizeof(cmd->body);
  171. cmd->body.cid = vcotbl->ctx->id;
  172. cmd->body.type = vcotbl->type;
  173. cmd->body.mobid = bo->mem.start;
  174. cmd->body.validSizeInBytes = vcotbl->size_read_back;
  175. vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
  176. vcotbl->scrubbed = false;
  177. return 0;
  178. }
  179. /**
  180. * vmw_cotable_bind - Undo a cotable unscrub operation
  181. *
  182. * @res: Pointer to the cotable resource
  183. * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
  184. * for convenience / fencing.
  185. *
  186. * This function issues commands to (re)bind the cotable to
  187. * its backing mob, which needs to be validated and reserved at this point.
  188. */
  189. static int vmw_cotable_bind(struct vmw_resource *res,
  190. struct ttm_validate_buffer *val_buf)
  191. {
  192. /*
  193. * The create() callback may have changed @res->backup without
  194. * the caller noticing, and with val_buf->bo still pointing to
  195. * the old backup buffer. Although hackish, and not used currently,
  196. * take the opportunity to correct the value here so that it's not
  197. * misused in the future.
  198. */
  199. val_buf->bo = &res->backup->base;
  200. return vmw_cotable_unscrub(res);
  201. }
  202. /**
  203. * vmw_cotable_scrub - Scrub the cotable from the device.
  204. *
  205. * @res: Pointer to the cotable resource.
  206. * @readback: Whether initiate a readback of the cotable data to the backup
  207. * buffer.
  208. *
  209. * In some situations (context swapouts) it might be desirable to make the
  210. * device forget about the cotable without performing a full unbind. A full
  211. * unbind requires reserved backup buffers and it might not be possible to
  212. * reserve them due to locking order violation issues. The vmw_cotable_scrub
  213. * function implements a partial unbind() without that requirement but with the
  214. * following restrictions.
  215. * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
  216. * be called.
  217. * 2) Before the cotable backing buffer is used by the CPU, or during the
  218. * resource destruction, vmw_cotable_unbind() must be called.
  219. */
  220. int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
  221. {
  222. struct vmw_cotable *vcotbl = vmw_cotable(res);
  223. struct vmw_private *dev_priv = res->dev_priv;
  224. size_t submit_size;
  225. struct {
  226. SVGA3dCmdHeader header;
  227. SVGA3dCmdDXReadbackCOTable body;
  228. } *cmd0;
  229. struct {
  230. SVGA3dCmdHeader header;
  231. SVGA3dCmdDXSetCOTable body;
  232. } *cmd1;
  233. if (vcotbl->scrubbed)
  234. return 0;
  235. if (co_info[vcotbl->type].unbind_func)
  236. co_info[vcotbl->type].unbind_func(dev_priv,
  237. &vcotbl->resource_list,
  238. readback);
  239. submit_size = sizeof(*cmd1);
  240. if (readback)
  241. submit_size += sizeof(*cmd0);
  242. cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
  243. if (!cmd1) {
  244. DRM_ERROR("Failed reserving FIFO space for cotable "
  245. "unbinding.\n");
  246. return -ENOMEM;
  247. }
  248. vcotbl->size_read_back = 0;
  249. if (readback) {
  250. cmd0 = (void *) cmd1;
  251. cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
  252. cmd0->header.size = sizeof(cmd0->body);
  253. cmd0->body.cid = vcotbl->ctx->id;
  254. cmd0->body.type = vcotbl->type;
  255. cmd1 = (void *) &cmd0[1];
  256. vcotbl->size_read_back = res->backup_size;
  257. }
  258. cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
  259. cmd1->header.size = sizeof(cmd1->body);
  260. cmd1->body.cid = vcotbl->ctx->id;
  261. cmd1->body.type = vcotbl->type;
  262. cmd1->body.mobid = SVGA3D_INVALID_ID;
  263. cmd1->body.validSizeInBytes = 0;
  264. vmw_fifo_commit_flush(dev_priv, submit_size);
  265. vcotbl->scrubbed = true;
  266. /* Trigger a create() on next validate. */
  267. res->id = -1;
  268. return 0;
  269. }
  270. /**
  271. * vmw_cotable_unbind - Cotable resource unbind callback
  272. *
  273. * @res: Pointer to the cotable resource.
  274. * @readback: Whether to read back cotable data to the backup buffer.
  275. * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
  276. * for convenience / fencing.
  277. *
  278. * Unbinds the cotable from the device and fences the backup buffer.
  279. */
  280. static int vmw_cotable_unbind(struct vmw_resource *res,
  281. bool readback,
  282. struct ttm_validate_buffer *val_buf)
  283. {
  284. struct vmw_cotable *vcotbl = vmw_cotable(res);
  285. struct vmw_private *dev_priv = res->dev_priv;
  286. struct ttm_buffer_object *bo = val_buf->bo;
  287. struct vmw_fence_obj *fence;
  288. if (list_empty(&res->mob_head))
  289. return 0;
  290. WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
  291. lockdep_assert_held(&bo->resv->lock.base);
  292. mutex_lock(&dev_priv->binding_mutex);
  293. if (!vcotbl->scrubbed)
  294. vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
  295. mutex_unlock(&dev_priv->binding_mutex);
  296. (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  297. vmw_fence_single_bo(bo, fence);
  298. if (likely(fence != NULL))
  299. vmw_fence_obj_unreference(&fence);
  300. return 0;
  301. }
  302. /**
  303. * vmw_cotable_readback - Read back a cotable without unbinding.
  304. *
  305. * @res: The cotable resource.
  306. *
  307. * Reads back a cotable to its backing mob without scrubbing the MOB from
  308. * the cotable. The MOB is fenced for subsequent CPU access.
  309. */
  310. static int vmw_cotable_readback(struct vmw_resource *res)
  311. {
  312. struct vmw_cotable *vcotbl = vmw_cotable(res);
  313. struct vmw_private *dev_priv = res->dev_priv;
  314. struct {
  315. SVGA3dCmdHeader header;
  316. SVGA3dCmdDXReadbackCOTable body;
  317. } *cmd;
  318. struct vmw_fence_obj *fence;
  319. if (!vcotbl->scrubbed) {
  320. cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
  321. SVGA3D_INVALID_ID);
  322. if (!cmd) {
  323. DRM_ERROR("Failed reserving FIFO space for cotable "
  324. "readback.\n");
  325. return -ENOMEM;
  326. }
  327. cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
  328. cmd->header.size = sizeof(cmd->body);
  329. cmd->body.cid = vcotbl->ctx->id;
  330. cmd->body.type = vcotbl->type;
  331. vcotbl->size_read_back = res->backup_size;
  332. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  333. }
  334. (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  335. vmw_fence_single_bo(&res->backup->base, fence);
  336. vmw_fence_obj_unreference(&fence);
  337. return 0;
  338. }
  339. /**
  340. * vmw_cotable_resize - Resize a cotable.
  341. *
  342. * @res: The cotable resource.
  343. * @new_size: The new size.
  344. *
  345. * Resizes a cotable and binds the new backup buffer.
  346. * On failure the cotable is left intact.
  347. * Important! This function may not fail once the MOB switch has been
  348. * committed to hardware. That would put the device context in an
  349. * invalid state which we can't currently recover from.
  350. */
  351. static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
  352. {
  353. struct vmw_private *dev_priv = res->dev_priv;
  354. struct vmw_cotable *vcotbl = vmw_cotable(res);
  355. struct vmw_dma_buffer *buf, *old_buf = res->backup;
  356. struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
  357. size_t old_size = res->backup_size;
  358. size_t old_size_read_back = vcotbl->size_read_back;
  359. size_t cur_size_read_back;
  360. struct ttm_bo_kmap_obj old_map, new_map;
  361. int ret;
  362. size_t i;
  363. ret = vmw_cotable_readback(res);
  364. if (ret)
  365. return ret;
  366. cur_size_read_back = vcotbl->size_read_back;
  367. vcotbl->size_read_back = old_size_read_back;
  368. /*
  369. * While device is processing, Allocate and reserve a buffer object
  370. * for the new COTable. Initially pin the buffer object to make sure
  371. * we can use tryreserve without failure.
  372. */
  373. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  374. if (!buf)
  375. return -ENOMEM;
  376. ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
  377. true, vmw_dmabuf_bo_free);
  378. if (ret) {
  379. DRM_ERROR("Failed initializing new cotable MOB.\n");
  380. return ret;
  381. }
  382. bo = &buf->base;
  383. WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
  384. ret = ttm_bo_wait(old_bo, false, false, false);
  385. if (unlikely(ret != 0)) {
  386. DRM_ERROR("Failed waiting for cotable unbind.\n");
  387. goto out_wait;
  388. }
  389. /*
  390. * Do a page by page copy of COTables. This eliminates slow vmap()s.
  391. * This should really be a TTM utility.
  392. */
  393. for (i = 0; i < old_bo->num_pages; ++i) {
  394. bool dummy;
  395. ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
  396. if (unlikely(ret != 0)) {
  397. DRM_ERROR("Failed mapping old COTable on resize.\n");
  398. goto out_wait;
  399. }
  400. ret = ttm_bo_kmap(bo, i, 1, &new_map);
  401. if (unlikely(ret != 0)) {
  402. DRM_ERROR("Failed mapping new COTable on resize.\n");
  403. goto out_map_new;
  404. }
  405. memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
  406. ttm_kmap_obj_virtual(&old_map, &dummy),
  407. PAGE_SIZE);
  408. ttm_bo_kunmap(&new_map);
  409. ttm_bo_kunmap(&old_map);
  410. }
  411. /* Unpin new buffer, and switch backup buffers. */
  412. ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
  413. if (unlikely(ret != 0)) {
  414. DRM_ERROR("Failed validating new COTable backup buffer.\n");
  415. goto out_wait;
  416. }
  417. res->backup = buf;
  418. res->backup_size = new_size;
  419. vcotbl->size_read_back = cur_size_read_back;
  420. /*
  421. * Now tell the device to switch. If this fails, then we need to
  422. * revert the full resize.
  423. */
  424. ret = vmw_cotable_unscrub(res);
  425. if (ret) {
  426. DRM_ERROR("Failed switching COTable backup buffer.\n");
  427. res->backup = old_buf;
  428. res->backup_size = old_size;
  429. vcotbl->size_read_back = old_size_read_back;
  430. goto out_wait;
  431. }
  432. /* Let go of the old mob. */
  433. list_del(&res->mob_head);
  434. list_add_tail(&res->mob_head, &buf->res_list);
  435. vmw_dmabuf_unreference(&old_buf);
  436. res->id = vcotbl->type;
  437. return 0;
  438. out_map_new:
  439. ttm_bo_kunmap(&old_map);
  440. out_wait:
  441. ttm_bo_unreserve(bo);
  442. vmw_dmabuf_unreference(&buf);
  443. return ret;
  444. }
  445. /**
  446. * vmw_cotable_create - Cotable resource create callback
  447. *
  448. * @res: Pointer to a cotable resource.
  449. *
  450. * There is no separate create command for cotables, so this callback, which
  451. * is called before bind() in the validation sequence is instead used for two
  452. * things.
  453. * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
  454. * buffer, that is, if @res->mob_head is non-empty.
  455. * 2) Resize the cotable if needed.
  456. */
  457. static int vmw_cotable_create(struct vmw_resource *res)
  458. {
  459. struct vmw_cotable *vcotbl = vmw_cotable(res);
  460. size_t new_size = res->backup_size;
  461. size_t needed_size;
  462. int ret;
  463. /* Check whether we need to resize the cotable */
  464. needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
  465. while (needed_size > new_size)
  466. new_size *= 2;
  467. if (likely(new_size <= res->backup_size)) {
  468. if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
  469. ret = vmw_cotable_unscrub(res);
  470. if (ret)
  471. return ret;
  472. }
  473. res->id = vcotbl->type;
  474. return 0;
  475. }
  476. return vmw_cotable_resize(res, new_size);
  477. }
  478. /**
  479. * vmw_hw_cotable_destroy - Cotable hw_destroy callback
  480. *
  481. * @res: Pointer to a cotable resource.
  482. *
  483. * The final (part of resource destruction) destroy callback.
  484. */
  485. static void vmw_hw_cotable_destroy(struct vmw_resource *res)
  486. {
  487. (void) vmw_cotable_destroy(res);
  488. }
  489. static size_t cotable_acc_size;
  490. /**
  491. * vmw_cotable_free - Cotable resource destructor
  492. *
  493. * @res: Pointer to a cotable resource.
  494. */
  495. static void vmw_cotable_free(struct vmw_resource *res)
  496. {
  497. struct vmw_private *dev_priv = res->dev_priv;
  498. kfree(res);
  499. ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
  500. }
  501. /**
  502. * vmw_cotable_alloc - Create a cotable resource
  503. *
  504. * @dev_priv: Pointer to a device private struct.
  505. * @ctx: Pointer to the context resource.
  506. * The cotable resource will not add a refcount.
  507. * @type: The cotable type.
  508. */
  509. struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
  510. struct vmw_resource *ctx,
  511. u32 type)
  512. {
  513. struct vmw_cotable *vcotbl;
  514. int ret;
  515. u32 num_entries;
  516. if (unlikely(cotable_acc_size == 0))
  517. cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
  518. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  519. cotable_acc_size, false, true);
  520. if (unlikely(ret))
  521. return ERR_PTR(ret);
  522. vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
  523. if (unlikely(vcotbl == NULL)) {
  524. ret = -ENOMEM;
  525. goto out_no_alloc;
  526. }
  527. ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
  528. vmw_cotable_free, &vmw_cotable_func);
  529. if (unlikely(ret != 0))
  530. goto out_no_init;
  531. INIT_LIST_HEAD(&vcotbl->resource_list);
  532. vcotbl->res.id = type;
  533. vcotbl->res.backup_size = PAGE_SIZE;
  534. num_entries = PAGE_SIZE / co_info[type].size;
  535. if (num_entries < co_info[type].min_initial_entries) {
  536. vcotbl->res.backup_size = co_info[type].min_initial_entries *
  537. co_info[type].size;
  538. vcotbl->res.backup_size =
  539. (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
  540. }
  541. vcotbl->scrubbed = true;
  542. vcotbl->seen_entries = -1;
  543. vcotbl->type = type;
  544. vcotbl->ctx = ctx;
  545. vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
  546. return &vcotbl->res;
  547. out_no_init:
  548. kfree(vcotbl);
  549. out_no_alloc:
  550. ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
  551. return ERR_PTR(ret);
  552. }
  553. /**
  554. * vmw_cotable_notify - Notify the cotable about an item creation
  555. *
  556. * @res: Pointer to a cotable resource.
  557. * @id: Item id.
  558. */
  559. int vmw_cotable_notify(struct vmw_resource *res, int id)
  560. {
  561. struct vmw_cotable *vcotbl = vmw_cotable(res);
  562. if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
  563. DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
  564. (unsigned) vcotbl->type, id);
  565. return -EINVAL;
  566. }
  567. if (vcotbl->seen_entries < id) {
  568. /* Trigger a call to create() on next validate */
  569. res->id = -1;
  570. vcotbl->seen_entries = id;
  571. }
  572. return 0;
  573. }
  574. /**
  575. * vmw_cotable_add_view - add a view to the cotable's list of active views.
  576. *
  577. * @res: pointer struct vmw_resource representing the cotable.
  578. * @head: pointer to the struct list_head member of the resource, dedicated
  579. * to the cotable active resource list.
  580. */
  581. void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
  582. {
  583. struct vmw_cotable *vcotbl =
  584. container_of(res, struct vmw_cotable, res);
  585. list_add_tail(head, &vcotbl->resource_list);
  586. }