mdp4_crtc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "mdp4_kms.h"
  18. #include <drm/drm_mode.h>
  19. #include "drm_crtc.h"
  20. #include "drm_crtc_helper.h"
  21. #include "drm_flip_work.h"
  22. struct mdp4_crtc {
  23. struct drm_crtc base;
  24. char name[8];
  25. int id;
  26. int ovlp;
  27. enum mdp4_dma dma;
  28. bool enabled;
  29. /* which mixer/encoder we route output to: */
  30. int mixer;
  31. struct {
  32. spinlock_t lock;
  33. bool stale;
  34. uint32_t width, height;
  35. uint32_t x, y;
  36. /* next cursor to scan-out: */
  37. uint32_t next_iova;
  38. struct drm_gem_object *next_bo;
  39. /* current cursor being scanned out: */
  40. struct drm_gem_object *scanout_bo;
  41. } cursor;
  42. /* if there is a pending flip, these will be non-null: */
  43. struct drm_pending_vblank_event *event;
  44. /* Bits have been flushed at the last commit,
  45. * used to decide if a vsync has happened since last commit.
  46. */
  47. u32 flushed_mask;
  48. #define PENDING_CURSOR 0x1
  49. #define PENDING_FLIP 0x2
  50. atomic_t pending;
  51. /* for unref'ing cursor bo's after scanout completes: */
  52. struct drm_flip_work unref_cursor_work;
  53. struct mdp_irq vblank;
  54. struct mdp_irq err;
  55. };
  56. #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
  57. static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
  58. {
  59. struct msm_drm_private *priv = crtc->dev->dev_private;
  60. return to_mdp4_kms(to_mdp_kms(priv->kms));
  61. }
  62. static void request_pending(struct drm_crtc *crtc, uint32_t pending)
  63. {
  64. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  65. atomic_or(pending, &mdp4_crtc->pending);
  66. mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
  67. }
  68. static void crtc_flush(struct drm_crtc *crtc)
  69. {
  70. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  71. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  72. struct drm_plane *plane;
  73. uint32_t flush = 0;
  74. drm_atomic_crtc_for_each_plane(plane, crtc) {
  75. enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
  76. flush |= pipe2flush(pipe_id);
  77. }
  78. flush |= ovlp2flush(mdp4_crtc->ovlp);
  79. DBG("%s: flush=%08x", mdp4_crtc->name, flush);
  80. mdp4_crtc->flushed_mask = flush;
  81. mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
  82. }
  83. /* if file!=NULL, this is preclose potential cancel-flip path */
  84. static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
  85. {
  86. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  87. struct drm_device *dev = crtc->dev;
  88. struct drm_pending_vblank_event *event;
  89. unsigned long flags;
  90. spin_lock_irqsave(&dev->event_lock, flags);
  91. event = mdp4_crtc->event;
  92. if (event) {
  93. /* if regular vblank case (!file) or if cancel-flip from
  94. * preclose on file that requested flip, then send the
  95. * event:
  96. */
  97. if (!file || (event->base.file_priv == file)) {
  98. mdp4_crtc->event = NULL;
  99. DBG("%s: send event: %p", mdp4_crtc->name, event);
  100. drm_send_vblank_event(dev, mdp4_crtc->id, event);
  101. }
  102. }
  103. spin_unlock_irqrestore(&dev->event_lock, flags);
  104. }
  105. static void unref_cursor_worker(struct drm_flip_work *work, void *val)
  106. {
  107. struct mdp4_crtc *mdp4_crtc =
  108. container_of(work, struct mdp4_crtc, unref_cursor_work);
  109. struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
  110. msm_gem_put_iova(val, mdp4_kms->id);
  111. drm_gem_object_unreference_unlocked(val);
  112. }
  113. static void mdp4_crtc_destroy(struct drm_crtc *crtc)
  114. {
  115. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  116. drm_crtc_cleanup(crtc);
  117. drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
  118. kfree(mdp4_crtc);
  119. }
  120. static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
  121. const struct drm_display_mode *mode,
  122. struct drm_display_mode *adjusted_mode)
  123. {
  124. return true;
  125. }
  126. /* statically (for now) map planes to mixer stage (z-order): */
  127. static const int idxs[] = {
  128. [VG1] = 1,
  129. [VG2] = 2,
  130. [RGB1] = 0,
  131. [RGB2] = 0,
  132. [RGB3] = 0,
  133. [VG3] = 3,
  134. [VG4] = 4,
  135. };
  136. /* setup mixer config, for which we need to consider all crtc's and
  137. * the planes attached to them
  138. *
  139. * TODO may possibly need some extra locking here
  140. */
  141. static void setup_mixer(struct mdp4_kms *mdp4_kms)
  142. {
  143. struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
  144. struct drm_crtc *crtc;
  145. uint32_t mixer_cfg = 0;
  146. static const enum mdp_mixer_stage_id stages[] = {
  147. STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
  148. };
  149. list_for_each_entry(crtc, &config->crtc_list, head) {
  150. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  151. struct drm_plane *plane;
  152. drm_atomic_crtc_for_each_plane(plane, crtc) {
  153. enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
  154. int idx = idxs[pipe_id];
  155. mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
  156. pipe_id, stages[idx]);
  157. }
  158. }
  159. mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
  160. }
  161. static void blend_setup(struct drm_crtc *crtc)
  162. {
  163. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  164. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  165. struct drm_plane *plane;
  166. int i, ovlp = mdp4_crtc->ovlp;
  167. bool alpha[4]= { false, false, false, false };
  168. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
  169. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
  170. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
  171. mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
  172. drm_atomic_crtc_for_each_plane(plane, crtc) {
  173. enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
  174. int idx = idxs[pipe_id];
  175. if (idx > 0) {
  176. const struct mdp_format *format =
  177. to_mdp_format(msm_framebuffer_format(plane->fb));
  178. alpha[idx-1] = format->alpha_enable;
  179. }
  180. }
  181. for (i = 0; i < 4; i++) {
  182. uint32_t op;
  183. if (alpha[i]) {
  184. op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
  185. MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
  186. MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
  187. } else {
  188. op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
  189. MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
  190. }
  191. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
  192. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
  193. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
  194. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
  195. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
  196. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
  197. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
  198. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
  199. }
  200. setup_mixer(mdp4_kms);
  201. }
  202. static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
  203. {
  204. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  205. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  206. enum mdp4_dma dma = mdp4_crtc->dma;
  207. int ovlp = mdp4_crtc->ovlp;
  208. struct drm_display_mode *mode;
  209. if (WARN_ON(!crtc->state))
  210. return;
  211. mode = &crtc->state->adjusted_mode;
  212. DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
  213. mdp4_crtc->name, mode->base.id, mode->name,
  214. mode->vrefresh, mode->clock,
  215. mode->hdisplay, mode->hsync_start,
  216. mode->hsync_end, mode->htotal,
  217. mode->vdisplay, mode->vsync_start,
  218. mode->vsync_end, mode->vtotal,
  219. mode->type, mode->flags);
  220. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
  221. MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
  222. MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
  223. /* take data from pipe: */
  224. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
  225. mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
  226. mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
  227. MDP4_DMA_DST_SIZE_WIDTH(0) |
  228. MDP4_DMA_DST_SIZE_HEIGHT(0));
  229. mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
  230. mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
  231. MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
  232. MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
  233. mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
  234. mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
  235. if (dma == DMA_E) {
  236. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
  237. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
  238. mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
  239. }
  240. }
  241. static void mdp4_crtc_disable(struct drm_crtc *crtc)
  242. {
  243. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  244. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  245. DBG("%s", mdp4_crtc->name);
  246. if (WARN_ON(!mdp4_crtc->enabled))
  247. return;
  248. mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
  249. mdp4_disable(mdp4_kms);
  250. mdp4_crtc->enabled = false;
  251. }
  252. static void mdp4_crtc_enable(struct drm_crtc *crtc)
  253. {
  254. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  255. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  256. DBG("%s", mdp4_crtc->name);
  257. if (WARN_ON(mdp4_crtc->enabled))
  258. return;
  259. mdp4_enable(mdp4_kms);
  260. mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
  261. crtc_flush(crtc);
  262. mdp4_crtc->enabled = true;
  263. }
  264. static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
  265. struct drm_crtc_state *state)
  266. {
  267. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  268. DBG("%s: check", mdp4_crtc->name);
  269. // TODO anything else to check?
  270. return 0;
  271. }
  272. static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
  273. struct drm_crtc_state *old_crtc_state)
  274. {
  275. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  276. DBG("%s: begin", mdp4_crtc->name);
  277. }
  278. static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
  279. struct drm_crtc_state *old_crtc_state)
  280. {
  281. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  282. struct drm_device *dev = crtc->dev;
  283. unsigned long flags;
  284. DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
  285. WARN_ON(mdp4_crtc->event);
  286. spin_lock_irqsave(&dev->event_lock, flags);
  287. mdp4_crtc->event = crtc->state->event;
  288. spin_unlock_irqrestore(&dev->event_lock, flags);
  289. blend_setup(crtc);
  290. crtc_flush(crtc);
  291. request_pending(crtc, PENDING_FLIP);
  292. }
  293. static int mdp4_crtc_set_property(struct drm_crtc *crtc,
  294. struct drm_property *property, uint64_t val)
  295. {
  296. // XXX
  297. return -EINVAL;
  298. }
  299. #define CURSOR_WIDTH 64
  300. #define CURSOR_HEIGHT 64
  301. /* called from IRQ to update cursor related registers (if needed). The
  302. * cursor registers, other than x/y position, appear not to be double
  303. * buffered, and changing them other than from vblank seems to trigger
  304. * underflow.
  305. */
  306. static void update_cursor(struct drm_crtc *crtc)
  307. {
  308. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  309. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  310. enum mdp4_dma dma = mdp4_crtc->dma;
  311. unsigned long flags;
  312. spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
  313. if (mdp4_crtc->cursor.stale) {
  314. struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
  315. struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
  316. uint32_t iova = mdp4_crtc->cursor.next_iova;
  317. if (next_bo) {
  318. /* take a obj ref + iova ref when we start scanning out: */
  319. drm_gem_object_reference(next_bo);
  320. msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
  321. /* enable cursor: */
  322. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
  323. MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
  324. MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
  325. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
  326. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
  327. MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
  328. MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
  329. } else {
  330. /* disable cursor: */
  331. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
  332. mdp4_kms->blank_cursor_iova);
  333. }
  334. /* and drop the iova ref + obj rev when done scanning out: */
  335. if (prev_bo)
  336. drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
  337. mdp4_crtc->cursor.scanout_bo = next_bo;
  338. mdp4_crtc->cursor.stale = false;
  339. }
  340. mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
  341. MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
  342. MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
  343. spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
  344. }
  345. static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
  346. struct drm_file *file_priv, uint32_t handle,
  347. uint32_t width, uint32_t height)
  348. {
  349. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  350. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  351. struct drm_device *dev = crtc->dev;
  352. struct drm_gem_object *cursor_bo, *old_bo;
  353. unsigned long flags;
  354. uint32_t iova;
  355. int ret;
  356. if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
  357. dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
  358. return -EINVAL;
  359. }
  360. if (handle) {
  361. cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
  362. if (!cursor_bo)
  363. return -ENOENT;
  364. } else {
  365. cursor_bo = NULL;
  366. }
  367. if (cursor_bo) {
  368. ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
  369. if (ret)
  370. goto fail;
  371. } else {
  372. iova = 0;
  373. }
  374. spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
  375. old_bo = mdp4_crtc->cursor.next_bo;
  376. mdp4_crtc->cursor.next_bo = cursor_bo;
  377. mdp4_crtc->cursor.next_iova = iova;
  378. mdp4_crtc->cursor.width = width;
  379. mdp4_crtc->cursor.height = height;
  380. mdp4_crtc->cursor.stale = true;
  381. spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
  382. if (old_bo) {
  383. /* drop our previous reference: */
  384. drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
  385. }
  386. request_pending(crtc, PENDING_CURSOR);
  387. return 0;
  388. fail:
  389. drm_gem_object_unreference_unlocked(cursor_bo);
  390. return ret;
  391. }
  392. static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  393. {
  394. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  395. unsigned long flags;
  396. spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
  397. mdp4_crtc->cursor.x = x;
  398. mdp4_crtc->cursor.y = y;
  399. spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
  400. crtc_flush(crtc);
  401. request_pending(crtc, PENDING_CURSOR);
  402. return 0;
  403. }
  404. static const struct drm_crtc_funcs mdp4_crtc_funcs = {
  405. .set_config = drm_atomic_helper_set_config,
  406. .destroy = mdp4_crtc_destroy,
  407. .page_flip = drm_atomic_helper_page_flip,
  408. .set_property = mdp4_crtc_set_property,
  409. .cursor_set = mdp4_crtc_cursor_set,
  410. .cursor_move = mdp4_crtc_cursor_move,
  411. .reset = drm_atomic_helper_crtc_reset,
  412. .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
  413. .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
  414. };
  415. static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
  416. .mode_fixup = mdp4_crtc_mode_fixup,
  417. .mode_set_nofb = mdp4_crtc_mode_set_nofb,
  418. .disable = mdp4_crtc_disable,
  419. .enable = mdp4_crtc_enable,
  420. .atomic_check = mdp4_crtc_atomic_check,
  421. .atomic_begin = mdp4_crtc_atomic_begin,
  422. .atomic_flush = mdp4_crtc_atomic_flush,
  423. };
  424. static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
  425. {
  426. struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
  427. struct drm_crtc *crtc = &mdp4_crtc->base;
  428. struct msm_drm_private *priv = crtc->dev->dev_private;
  429. unsigned pending;
  430. mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
  431. pending = atomic_xchg(&mdp4_crtc->pending, 0);
  432. if (pending & PENDING_FLIP) {
  433. complete_flip(crtc, NULL);
  434. }
  435. if (pending & PENDING_CURSOR) {
  436. update_cursor(crtc);
  437. drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
  438. }
  439. }
  440. static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
  441. {
  442. struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
  443. struct drm_crtc *crtc = &mdp4_crtc->base;
  444. DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
  445. crtc_flush(crtc);
  446. }
  447. static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
  448. {
  449. struct drm_device *dev = crtc->dev;
  450. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  451. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  452. int ret;
  453. ret = drm_crtc_vblank_get(crtc);
  454. if (ret)
  455. return;
  456. ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
  457. !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
  458. mdp4_crtc->flushed_mask),
  459. msecs_to_jiffies(50));
  460. if (ret <= 0)
  461. dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
  462. mdp4_crtc->flushed_mask = 0;
  463. drm_crtc_vblank_put(crtc);
  464. }
  465. uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
  466. {
  467. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  468. return mdp4_crtc->vblank.irqmask;
  469. }
  470. void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
  471. {
  472. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  473. DBG("%s: cancel: %p", mdp4_crtc->name, file);
  474. complete_flip(crtc, file);
  475. }
  476. /* set dma config, ie. the format the encoder wants. */
  477. void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
  478. {
  479. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  480. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  481. mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
  482. }
  483. /* set interface for routing crtc->encoder: */
  484. void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
  485. {
  486. struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
  487. struct mdp4_kms *mdp4_kms = get_kms(crtc);
  488. uint32_t intf_sel;
  489. intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
  490. switch (mdp4_crtc->dma) {
  491. case DMA_P:
  492. intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
  493. intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
  494. break;
  495. case DMA_S:
  496. intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
  497. intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
  498. break;
  499. case DMA_E:
  500. intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
  501. intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
  502. break;
  503. }
  504. if (intf == INTF_DSI_VIDEO) {
  505. intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
  506. intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
  507. } else if (intf == INTF_DSI_CMD) {
  508. intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
  509. intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
  510. }
  511. mdp4_crtc->mixer = mixer;
  512. blend_setup(crtc);
  513. DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
  514. mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
  515. }
  516. void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
  517. {
  518. /* wait_for_flush_done is the only case for now.
  519. * Later we will have command mode CRTC to wait for
  520. * other event.
  521. */
  522. mdp4_crtc_wait_for_flush_done(crtc);
  523. }
  524. static const char *dma_names[] = {
  525. "DMA_P", "DMA_S", "DMA_E",
  526. };
  527. /* initialize crtc */
  528. struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
  529. struct drm_plane *plane, int id, int ovlp_id,
  530. enum mdp4_dma dma_id)
  531. {
  532. struct drm_crtc *crtc = NULL;
  533. struct mdp4_crtc *mdp4_crtc;
  534. mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
  535. if (!mdp4_crtc)
  536. return ERR_PTR(-ENOMEM);
  537. crtc = &mdp4_crtc->base;
  538. mdp4_crtc->id = id;
  539. mdp4_crtc->ovlp = ovlp_id;
  540. mdp4_crtc->dma = dma_id;
  541. mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
  542. mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
  543. mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
  544. mdp4_crtc->err.irq = mdp4_crtc_err_irq;
  545. snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
  546. dma_names[dma_id], ovlp_id);
  547. spin_lock_init(&mdp4_crtc->cursor.lock);
  548. drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
  549. "unref cursor", unref_cursor_worker);
  550. drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
  551. drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
  552. plane->crtc = crtc;
  553. return crtc;
  554. }