vmwgfx_overlay.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <drm/drmP.h>
  28. #include "vmwgfx_drv.h"
  29. #include <drm/ttm/ttm_placement.h>
  30. #include "device_include/svga_overlay.h"
  31. #include "device_include/svga_escape.h"
  32. #define VMW_MAX_NUM_STREAMS 1
  33. #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
  34. struct vmw_stream {
  35. struct vmw_dma_buffer *buf;
  36. bool claimed;
  37. bool paused;
  38. struct drm_vmw_control_stream_arg saved;
  39. };
  40. /**
  41. * Overlay control
  42. */
  43. struct vmw_overlay {
  44. /*
  45. * Each stream is a single overlay. In Xv these are called ports.
  46. */
  47. struct mutex mutex;
  48. struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
  49. };
  50. static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
  51. {
  52. struct vmw_private *dev_priv = vmw_priv(dev);
  53. return dev_priv ? dev_priv->overlay_priv : NULL;
  54. }
  55. struct vmw_escape_header {
  56. uint32_t cmd;
  57. SVGAFifoCmdEscape body;
  58. };
  59. struct vmw_escape_video_flush {
  60. struct vmw_escape_header escape;
  61. SVGAEscapeVideoFlush flush;
  62. };
  63. static inline void fill_escape(struct vmw_escape_header *header,
  64. uint32_t size)
  65. {
  66. header->cmd = SVGA_CMD_ESCAPE;
  67. header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
  68. header->body.size = size;
  69. }
  70. static inline void fill_flush(struct vmw_escape_video_flush *cmd,
  71. uint32_t stream_id)
  72. {
  73. fill_escape(&cmd->escape, sizeof(cmd->flush));
  74. cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
  75. cmd->flush.streamId = stream_id;
  76. }
  77. /**
  78. * Send put command to hw.
  79. *
  80. * Returns
  81. * -ERESTARTSYS if interrupted by a signal.
  82. */
  83. static int vmw_overlay_send_put(struct vmw_private *dev_priv,
  84. struct vmw_dma_buffer *buf,
  85. struct drm_vmw_control_stream_arg *arg,
  86. bool interruptible)
  87. {
  88. struct vmw_escape_video_flush *flush;
  89. size_t fifo_size;
  90. bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
  91. int i, num_items;
  92. SVGAGuestPtr ptr;
  93. struct {
  94. struct vmw_escape_header escape;
  95. struct {
  96. uint32_t cmdType;
  97. uint32_t streamId;
  98. } header;
  99. } *cmds;
  100. struct {
  101. uint32_t registerId;
  102. uint32_t value;
  103. } *items;
  104. /* defines are a index needs + 1 */
  105. if (have_so)
  106. num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
  107. else
  108. num_items = SVGA_VIDEO_PITCH_3 + 1;
  109. fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
  110. cmds = vmw_fifo_reserve(dev_priv, fifo_size);
  111. /* hardware has hung, can't do anything here */
  112. if (!cmds)
  113. return -ENOMEM;
  114. items = (typeof(items))&cmds[1];
  115. flush = (struct vmw_escape_video_flush *)&items[num_items];
  116. /* the size is header + number of items */
  117. fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
  118. cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
  119. cmds->header.streamId = arg->stream_id;
  120. /* the IDs are neatly numbered */
  121. for (i = 0; i < num_items; i++)
  122. items[i].registerId = i;
  123. vmw_bo_get_guest_ptr(&buf->base, &ptr);
  124. ptr.offset += arg->offset;
  125. items[SVGA_VIDEO_ENABLED].value = true;
  126. items[SVGA_VIDEO_FLAGS].value = arg->flags;
  127. items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
  128. items[SVGA_VIDEO_FORMAT].value = arg->format;
  129. items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
  130. items[SVGA_VIDEO_SIZE].value = arg->size;
  131. items[SVGA_VIDEO_WIDTH].value = arg->width;
  132. items[SVGA_VIDEO_HEIGHT].value = arg->height;
  133. items[SVGA_VIDEO_SRC_X].value = arg->src.x;
  134. items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
  135. items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
  136. items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
  137. items[SVGA_VIDEO_DST_X].value = arg->dst.x;
  138. items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
  139. items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
  140. items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
  141. items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
  142. items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
  143. items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
  144. if (have_so) {
  145. items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
  146. items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
  147. }
  148. fill_flush(flush, arg->stream_id);
  149. vmw_fifo_commit(dev_priv, fifo_size);
  150. return 0;
  151. }
  152. /**
  153. * Send stop command to hw.
  154. *
  155. * Returns
  156. * -ERESTARTSYS if interrupted by a signal.
  157. */
  158. static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
  159. uint32_t stream_id,
  160. bool interruptible)
  161. {
  162. struct {
  163. struct vmw_escape_header escape;
  164. SVGAEscapeVideoSetRegs body;
  165. struct vmw_escape_video_flush flush;
  166. } *cmds;
  167. int ret;
  168. for (;;) {
  169. cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
  170. if (cmds)
  171. break;
  172. ret = vmw_fallback_wait(dev_priv, false, true, 0,
  173. interruptible, 3*HZ);
  174. if (interruptible && ret == -ERESTARTSYS)
  175. return ret;
  176. else
  177. BUG_ON(ret != 0);
  178. }
  179. fill_escape(&cmds->escape, sizeof(cmds->body));
  180. cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
  181. cmds->body.header.streamId = stream_id;
  182. cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
  183. cmds->body.items[0].value = false;
  184. fill_flush(&cmds->flush, stream_id);
  185. vmw_fifo_commit(dev_priv, sizeof(*cmds));
  186. return 0;
  187. }
  188. /**
  189. * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
  190. *
  191. * With the introduction of screen objects buffers could now be
  192. * used with GMRs instead of being locked to vram.
  193. */
  194. static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
  195. struct vmw_dma_buffer *buf,
  196. bool pin, bool inter)
  197. {
  198. if (!pin)
  199. return vmw_dmabuf_unpin(dev_priv, buf, inter);
  200. if (dev_priv->active_display_unit == vmw_du_legacy)
  201. return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
  202. return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
  203. }
  204. /**
  205. * Stop or pause a stream.
  206. *
  207. * If the stream is paused the no evict flag is removed from the buffer
  208. * but left in vram. This allows for instance mode_set to evict it
  209. * should it need to.
  210. *
  211. * The caller must hold the overlay lock.
  212. *
  213. * @stream_id which stream to stop/pause.
  214. * @pause true to pause, false to stop completely.
  215. */
  216. static int vmw_overlay_stop(struct vmw_private *dev_priv,
  217. uint32_t stream_id, bool pause,
  218. bool interruptible)
  219. {
  220. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  221. struct vmw_stream *stream = &overlay->stream[stream_id];
  222. int ret;
  223. /* no buffer attached the stream is completely stopped */
  224. if (!stream->buf)
  225. return 0;
  226. /* If the stream is paused this is already done */
  227. if (!stream->paused) {
  228. ret = vmw_overlay_send_stop(dev_priv, stream_id,
  229. interruptible);
  230. if (ret)
  231. return ret;
  232. /* We just remove the NO_EVICT flag so no -ENOMEM */
  233. ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
  234. interruptible);
  235. if (interruptible && ret == -ERESTARTSYS)
  236. return ret;
  237. else
  238. BUG_ON(ret != 0);
  239. }
  240. if (!pause) {
  241. vmw_dmabuf_unreference(&stream->buf);
  242. stream->paused = false;
  243. } else {
  244. stream->paused = true;
  245. }
  246. return 0;
  247. }
  248. /**
  249. * Update a stream and send any put or stop fifo commands needed.
  250. *
  251. * The caller must hold the overlay lock.
  252. *
  253. * Returns
  254. * -ENOMEM if buffer doesn't fit in vram.
  255. * -ERESTARTSYS if interrupted.
  256. */
  257. static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
  258. struct vmw_dma_buffer *buf,
  259. struct drm_vmw_control_stream_arg *arg,
  260. bool interruptible)
  261. {
  262. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  263. struct vmw_stream *stream = &overlay->stream[arg->stream_id];
  264. int ret = 0;
  265. if (!buf)
  266. return -EINVAL;
  267. DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
  268. stream->buf, buf, stream->paused ? "" : "not ");
  269. if (stream->buf != buf) {
  270. ret = vmw_overlay_stop(dev_priv, arg->stream_id,
  271. false, interruptible);
  272. if (ret)
  273. return ret;
  274. } else if (!stream->paused) {
  275. /* If the buffers match and not paused then just send
  276. * the put command, no need to do anything else.
  277. */
  278. ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
  279. if (ret == 0)
  280. stream->saved = *arg;
  281. else
  282. BUG_ON(!interruptible);
  283. return ret;
  284. }
  285. /* We don't start the old stream if we are interrupted.
  286. * Might return -ENOMEM if it can't fit the buffer in vram.
  287. */
  288. ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
  289. if (ret)
  290. return ret;
  291. ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
  292. if (ret) {
  293. /* This one needs to happen no matter what. We only remove
  294. * the NO_EVICT flag so this is safe from -ENOMEM.
  295. */
  296. BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
  297. != 0);
  298. return ret;
  299. }
  300. if (stream->buf != buf)
  301. stream->buf = vmw_dmabuf_reference(buf);
  302. stream->saved = *arg;
  303. /* stream is no longer stopped/paused */
  304. stream->paused = false;
  305. return 0;
  306. }
  307. /**
  308. * Stop all streams.
  309. *
  310. * Used by the fb code when starting.
  311. *
  312. * Takes the overlay lock.
  313. */
  314. int vmw_overlay_stop_all(struct vmw_private *dev_priv)
  315. {
  316. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  317. int i, ret;
  318. if (!overlay)
  319. return 0;
  320. mutex_lock(&overlay->mutex);
  321. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  322. struct vmw_stream *stream = &overlay->stream[i];
  323. if (!stream->buf)
  324. continue;
  325. ret = vmw_overlay_stop(dev_priv, i, false, false);
  326. WARN_ON(ret != 0);
  327. }
  328. mutex_unlock(&overlay->mutex);
  329. return 0;
  330. }
  331. /**
  332. * Try to resume all paused streams.
  333. *
  334. * Used by the kms code after moving a new scanout buffer to vram.
  335. *
  336. * Takes the overlay lock.
  337. */
  338. int vmw_overlay_resume_all(struct vmw_private *dev_priv)
  339. {
  340. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  341. int i, ret;
  342. if (!overlay)
  343. return 0;
  344. mutex_lock(&overlay->mutex);
  345. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  346. struct vmw_stream *stream = &overlay->stream[i];
  347. if (!stream->paused)
  348. continue;
  349. ret = vmw_overlay_update_stream(dev_priv, stream->buf,
  350. &stream->saved, false);
  351. if (ret != 0)
  352. DRM_INFO("%s: *warning* failed to resume stream %i\n",
  353. __func__, i);
  354. }
  355. mutex_unlock(&overlay->mutex);
  356. return 0;
  357. }
  358. /**
  359. * Pauses all active streams.
  360. *
  361. * Used by the kms code when moving a new scanout buffer to vram.
  362. *
  363. * Takes the overlay lock.
  364. */
  365. int vmw_overlay_pause_all(struct vmw_private *dev_priv)
  366. {
  367. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  368. int i, ret;
  369. if (!overlay)
  370. return 0;
  371. mutex_lock(&overlay->mutex);
  372. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  373. if (overlay->stream[i].paused)
  374. DRM_INFO("%s: *warning* stream %i already paused\n",
  375. __func__, i);
  376. ret = vmw_overlay_stop(dev_priv, i, true, false);
  377. WARN_ON(ret != 0);
  378. }
  379. mutex_unlock(&overlay->mutex);
  380. return 0;
  381. }
  382. static bool vmw_overlay_available(const struct vmw_private *dev_priv)
  383. {
  384. return (dev_priv->overlay_priv != NULL &&
  385. ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
  386. VMW_OVERLAY_CAP_MASK));
  387. }
  388. int vmw_overlay_ioctl(struct drm_device *dev, void *data,
  389. struct drm_file *file_priv)
  390. {
  391. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  392. struct vmw_private *dev_priv = vmw_priv(dev);
  393. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  394. struct drm_vmw_control_stream_arg *arg =
  395. (struct drm_vmw_control_stream_arg *)data;
  396. struct vmw_dma_buffer *buf;
  397. struct vmw_resource *res;
  398. int ret;
  399. if (!vmw_overlay_available(dev_priv))
  400. return -ENOSYS;
  401. ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
  402. if (ret)
  403. return ret;
  404. mutex_lock(&overlay->mutex);
  405. if (!arg->enabled) {
  406. ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
  407. goto out_unlock;
  408. }
  409. ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
  410. if (ret)
  411. goto out_unlock;
  412. ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
  413. vmw_dmabuf_unreference(&buf);
  414. out_unlock:
  415. mutex_unlock(&overlay->mutex);
  416. vmw_resource_unreference(&res);
  417. return ret;
  418. }
  419. int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
  420. {
  421. if (!vmw_overlay_available(dev_priv))
  422. return 0;
  423. return VMW_MAX_NUM_STREAMS;
  424. }
  425. int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
  426. {
  427. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  428. int i, k;
  429. if (!vmw_overlay_available(dev_priv))
  430. return 0;
  431. mutex_lock(&overlay->mutex);
  432. for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
  433. if (!overlay->stream[i].claimed)
  434. k++;
  435. mutex_unlock(&overlay->mutex);
  436. return k;
  437. }
  438. int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
  439. {
  440. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  441. int i;
  442. if (!overlay)
  443. return -ENOSYS;
  444. mutex_lock(&overlay->mutex);
  445. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  446. if (overlay->stream[i].claimed)
  447. continue;
  448. overlay->stream[i].claimed = true;
  449. *out = i;
  450. mutex_unlock(&overlay->mutex);
  451. return 0;
  452. }
  453. mutex_unlock(&overlay->mutex);
  454. return -ESRCH;
  455. }
  456. int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
  457. {
  458. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  459. BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
  460. if (!overlay)
  461. return -ENOSYS;
  462. mutex_lock(&overlay->mutex);
  463. WARN_ON(!overlay->stream[stream_id].claimed);
  464. vmw_overlay_stop(dev_priv, stream_id, false, false);
  465. overlay->stream[stream_id].claimed = false;
  466. mutex_unlock(&overlay->mutex);
  467. return 0;
  468. }
  469. int vmw_overlay_init(struct vmw_private *dev_priv)
  470. {
  471. struct vmw_overlay *overlay;
  472. int i;
  473. if (dev_priv->overlay_priv)
  474. return -EINVAL;
  475. overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
  476. if (!overlay)
  477. return -ENOMEM;
  478. mutex_init(&overlay->mutex);
  479. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  480. overlay->stream[i].buf = NULL;
  481. overlay->stream[i].paused = false;
  482. overlay->stream[i].claimed = false;
  483. }
  484. dev_priv->overlay_priv = overlay;
  485. return 0;
  486. }
  487. int vmw_overlay_close(struct vmw_private *dev_priv)
  488. {
  489. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  490. bool forgotten_buffer = false;
  491. int i;
  492. if (!overlay)
  493. return -ENOSYS;
  494. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  495. if (overlay->stream[i].buf) {
  496. forgotten_buffer = true;
  497. vmw_overlay_stop(dev_priv, i, false, false);
  498. }
  499. }
  500. WARN_ON(forgotten_buffer);
  501. dev_priv->overlay_priv = NULL;
  502. kfree(overlay);
  503. return 0;
  504. }