msm_atomic.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_kms.h"
  19. #include "msm_gem.h"
  20. struct msm_commit {
  21. struct drm_device *dev;
  22. struct drm_atomic_state *state;
  23. uint32_t fence;
  24. struct msm_fence_cb fence_cb;
  25. uint32_t crtc_mask;
  26. };
  27. static void fence_cb(struct msm_fence_cb *cb);
  28. /* block until specified crtcs are no longer pending update, and
  29. * atomically mark them as pending update
  30. */
  31. static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  32. {
  33. int ret;
  34. spin_lock(&priv->pending_crtcs_event.lock);
  35. ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
  36. !(priv->pending_crtcs & crtc_mask));
  37. if (ret == 0) {
  38. DBG("start: %08x", crtc_mask);
  39. priv->pending_crtcs |= crtc_mask;
  40. }
  41. spin_unlock(&priv->pending_crtcs_event.lock);
  42. return ret;
  43. }
  44. /* clear specified crtcs (no longer pending update)
  45. */
  46. static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
  47. {
  48. spin_lock(&priv->pending_crtcs_event.lock);
  49. DBG("end: %08x", crtc_mask);
  50. priv->pending_crtcs &= ~crtc_mask;
  51. wake_up_all_locked(&priv->pending_crtcs_event);
  52. spin_unlock(&priv->pending_crtcs_event.lock);
  53. }
  54. static struct msm_commit *commit_init(struct drm_atomic_state *state)
  55. {
  56. struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
  57. if (!c)
  58. return NULL;
  59. c->dev = state->dev;
  60. c->state = state;
  61. /* TODO we might need a way to indicate to run the cb on a
  62. * different wq so wait_for_vblanks() doesn't block retiring
  63. * bo's..
  64. */
  65. INIT_FENCE_CB(&c->fence_cb, fence_cb);
  66. return c;
  67. }
  68. static void commit_destroy(struct msm_commit *c)
  69. {
  70. end_atomic(c->dev->dev_private, c->crtc_mask);
  71. kfree(c);
  72. }
  73. static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
  74. struct drm_atomic_state *old_state)
  75. {
  76. struct drm_crtc *crtc;
  77. struct msm_drm_private *priv = old_state->dev->dev_private;
  78. struct msm_kms *kms = priv->kms;
  79. int ncrtcs = old_state->dev->mode_config.num_crtc;
  80. int i;
  81. for (i = 0; i < ncrtcs; i++) {
  82. crtc = old_state->crtcs[i];
  83. if (!crtc)
  84. continue;
  85. if (!crtc->state->enable)
  86. continue;
  87. /* Legacy cursor ioctls are completely unsynced, and userspace
  88. * relies on that (by doing tons of cursor updates). */
  89. if (old_state->legacy_cursor_update)
  90. continue;
  91. if (drm_crtc_vblank_get(crtc))
  92. continue;
  93. kms->funcs->wait_for_crtc_commit_done(kms, crtc);
  94. drm_crtc_vblank_put(crtc);
  95. }
  96. }
  97. /* The (potentially) asynchronous part of the commit. At this point
  98. * nothing can fail short of armageddon.
  99. */
  100. static void complete_commit(struct msm_commit *c)
  101. {
  102. struct drm_atomic_state *state = c->state;
  103. struct drm_device *dev = state->dev;
  104. struct msm_drm_private *priv = dev->dev_private;
  105. struct msm_kms *kms = priv->kms;
  106. kms->funcs->prepare_commit(kms, state);
  107. drm_atomic_helper_commit_modeset_disables(dev, state);
  108. drm_atomic_helper_commit_planes(dev, state, false);
  109. drm_atomic_helper_commit_modeset_enables(dev, state);
  110. /* NOTE: _wait_for_vblanks() only waits for vblank on
  111. * enabled CRTCs. So we end up faulting when disabling
  112. * due to (potentially) unref'ing the outgoing fb's
  113. * before the vblank when the disable has latched.
  114. *
  115. * But if it did wait on disabled (or newly disabled)
  116. * CRTCs, that would be racy (ie. we could have missed
  117. * the irq. We need some way to poll for pipe shut
  118. * down. Or just live with occasionally hitting the
  119. * timeout in the CRTC disable path (which really should
  120. * not be critical path)
  121. */
  122. msm_atomic_wait_for_commit_done(dev, state);
  123. drm_atomic_helper_cleanup_planes(dev, state);
  124. kms->funcs->complete_commit(kms, state);
  125. drm_atomic_state_free(state);
  126. commit_destroy(c);
  127. }
  128. static void fence_cb(struct msm_fence_cb *cb)
  129. {
  130. struct msm_commit *c =
  131. container_of(cb, struct msm_commit, fence_cb);
  132. complete_commit(c);
  133. }
  134. static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
  135. {
  136. struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
  137. c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
  138. }
  139. int msm_atomic_check(struct drm_device *dev,
  140. struct drm_atomic_state *state)
  141. {
  142. int ret;
  143. /*
  144. * msm ->atomic_check can update ->mode_changed for pixel format
  145. * changes, hence must be run before we check the modeset changes.
  146. */
  147. ret = drm_atomic_helper_check_planes(dev, state);
  148. if (ret)
  149. return ret;
  150. ret = drm_atomic_helper_check_modeset(dev, state);
  151. if (ret)
  152. return ret;
  153. return ret;
  154. }
  155. /**
  156. * drm_atomic_helper_commit - commit validated state object
  157. * @dev: DRM device
  158. * @state: the driver state object
  159. * @async: asynchronous commit
  160. *
  161. * This function commits a with drm_atomic_helper_check() pre-validated state
  162. * object. This can still fail when e.g. the framebuffer reservation fails. For
  163. * now this doesn't implement asynchronous commits.
  164. *
  165. * RETURNS
  166. * Zero for success or -errno.
  167. */
  168. int msm_atomic_commit(struct drm_device *dev,
  169. struct drm_atomic_state *state, bool async)
  170. {
  171. int nplanes = dev->mode_config.num_total_plane;
  172. int ncrtcs = dev->mode_config.num_crtc;
  173. ktime_t timeout;
  174. struct msm_commit *c;
  175. int i, ret;
  176. ret = drm_atomic_helper_prepare_planes(dev, state);
  177. if (ret)
  178. return ret;
  179. c = commit_init(state);
  180. if (!c) {
  181. ret = -ENOMEM;
  182. goto error;
  183. }
  184. /*
  185. * Figure out what crtcs we have:
  186. */
  187. for (i = 0; i < ncrtcs; i++) {
  188. struct drm_crtc *crtc = state->crtcs[i];
  189. if (!crtc)
  190. continue;
  191. c->crtc_mask |= (1 << drm_crtc_index(crtc));
  192. }
  193. /*
  194. * Figure out what fence to wait for:
  195. */
  196. for (i = 0; i < nplanes; i++) {
  197. struct drm_plane *plane = state->planes[i];
  198. struct drm_plane_state *new_state = state->plane_states[i];
  199. if (!plane)
  200. continue;
  201. if ((plane->state->fb != new_state->fb) && new_state->fb)
  202. add_fb(c, new_state->fb);
  203. }
  204. /*
  205. * Wait for pending updates on any of the same crtc's and then
  206. * mark our set of crtc's as busy:
  207. */
  208. ret = start_atomic(dev->dev_private, c->crtc_mask);
  209. if (ret) {
  210. kfree(c);
  211. goto error;
  212. }
  213. /*
  214. * This is the point of no return - everything below never fails except
  215. * when the hw goes bonghits. Which means we can commit the new state on
  216. * the software side now.
  217. */
  218. drm_atomic_helper_swap_state(dev, state);
  219. /*
  220. * Everything below can be run asynchronously without the need to grab
  221. * any modeset locks at all under one conditions: It must be guaranteed
  222. * that the asynchronous work has either been cancelled (if the driver
  223. * supports it, which at least requires that the framebuffers get
  224. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  225. * before the new state gets committed on the software side with
  226. * drm_atomic_helper_swap_state().
  227. *
  228. * This scheme allows new atomic state updates to be prepared and
  229. * checked in parallel to the asynchronous completion of the previous
  230. * update. Which is important since compositors need to figure out the
  231. * composition of the next frame right after having submitted the
  232. * current layout.
  233. */
  234. if (async) {
  235. msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
  236. return 0;
  237. }
  238. timeout = ktime_add_ms(ktime_get(), 1000);
  239. /* uninterruptible wait */
  240. msm_wait_fence(dev, c->fence, &timeout, false);
  241. complete_commit(c);
  242. return 0;
  243. error:
  244. drm_atomic_helper_cleanup_planes(dev, state);
  245. return ret;
  246. }