sti_gdp.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * Copyright (C) STMicroelectronics SA 2014
  3. * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
  4. * Fabien Dessenne <fabien.dessenne@st.com>
  5. * for STMicroelectronics.
  6. * License terms: GNU General Public License (GPL), version 2
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/dma-mapping.h>
  10. #include <drm/drm_fb_cma_helper.h>
  11. #include <drm/drm_gem_cma_helper.h>
  12. #include "sti_compositor.h"
  13. #include "sti_gdp.h"
  14. #include "sti_plane.h"
  15. #include "sti_vtg.h"
  16. #define ALPHASWITCH BIT(6)
  17. #define ENA_COLOR_FILL BIT(8)
  18. #define BIGNOTLITTLE BIT(23)
  19. #define WAIT_NEXT_VSYNC BIT(31)
  20. /* GDP color formats */
  21. #define GDP_RGB565 0x00
  22. #define GDP_RGB888 0x01
  23. #define GDP_RGB888_32 0x02
  24. #define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
  25. #define GDP_ARGB8565 0x04
  26. #define GDP_ARGB8888 0x05
  27. #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
  28. #define GDP_ARGB1555 0x06
  29. #define GDP_ARGB4444 0x07
  30. #define GDP_CLUT8 0x0B
  31. #define GDP_YCBR888 0x10
  32. #define GDP_YCBR422R 0x12
  33. #define GDP_AYCBR8888 0x15
  34. #define GAM_GDP_CTL_OFFSET 0x00
  35. #define GAM_GDP_AGC_OFFSET 0x04
  36. #define GAM_GDP_VPO_OFFSET 0x0C
  37. #define GAM_GDP_VPS_OFFSET 0x10
  38. #define GAM_GDP_PML_OFFSET 0x14
  39. #define GAM_GDP_PMP_OFFSET 0x18
  40. #define GAM_GDP_SIZE_OFFSET 0x1C
  41. #define GAM_GDP_NVN_OFFSET 0x24
  42. #define GAM_GDP_KEY1_OFFSET 0x28
  43. #define GAM_GDP_KEY2_OFFSET 0x2C
  44. #define GAM_GDP_PPT_OFFSET 0x34
  45. #define GAM_GDP_CML_OFFSET 0x3C
  46. #define GAM_GDP_MST_OFFSET 0x68
  47. #define GAM_GDP_ALPHARANGE_255 BIT(5)
  48. #define GAM_GDP_AGC_FULL_RANGE 0x00808080
  49. #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
  50. #define GAM_GDP_SIZE_MAX 0x7FF
  51. #define GDP_NODE_NB_BANK 2
  52. #define GDP_NODE_PER_FIELD 2
  53. struct sti_gdp_node {
  54. u32 gam_gdp_ctl;
  55. u32 gam_gdp_agc;
  56. u32 reserved1;
  57. u32 gam_gdp_vpo;
  58. u32 gam_gdp_vps;
  59. u32 gam_gdp_pml;
  60. u32 gam_gdp_pmp;
  61. u32 gam_gdp_size;
  62. u32 reserved2;
  63. u32 gam_gdp_nvn;
  64. u32 gam_gdp_key1;
  65. u32 gam_gdp_key2;
  66. u32 reserved3;
  67. u32 gam_gdp_ppt;
  68. u32 reserved4;
  69. u32 gam_gdp_cml;
  70. };
  71. struct sti_gdp_node_list {
  72. struct sti_gdp_node *top_field;
  73. dma_addr_t top_field_paddr;
  74. struct sti_gdp_node *btm_field;
  75. dma_addr_t btm_field_paddr;
  76. };
  77. /**
  78. * STI GDP structure
  79. *
  80. * @sti_plane: sti_plane structure
  81. * @dev: driver device
  82. * @regs: gdp registers
  83. * @clk_pix: pixel clock for the current gdp
  84. * @clk_main_parent: gdp parent clock if main path used
  85. * @clk_aux_parent: gdp parent clock if aux path used
  86. * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
  87. * @is_curr_top: true if the current node processed is the top field
  88. * @node_list: array of node list
  89. */
  90. struct sti_gdp {
  91. struct sti_plane plane;
  92. struct device *dev;
  93. void __iomem *regs;
  94. struct clk *clk_pix;
  95. struct clk *clk_main_parent;
  96. struct clk *clk_aux_parent;
  97. struct notifier_block vtg_field_nb;
  98. bool is_curr_top;
  99. struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
  100. };
  101. #define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
  102. static const uint32_t gdp_supported_formats[] = {
  103. DRM_FORMAT_XRGB8888,
  104. DRM_FORMAT_XBGR8888,
  105. DRM_FORMAT_ARGB8888,
  106. DRM_FORMAT_ABGR8888,
  107. DRM_FORMAT_ARGB4444,
  108. DRM_FORMAT_ARGB1555,
  109. DRM_FORMAT_RGB565,
  110. DRM_FORMAT_RGB888,
  111. DRM_FORMAT_AYUV,
  112. DRM_FORMAT_YUV444,
  113. DRM_FORMAT_VYUY,
  114. DRM_FORMAT_C8,
  115. };
  116. static int sti_gdp_fourcc2format(int fourcc)
  117. {
  118. switch (fourcc) {
  119. case DRM_FORMAT_XRGB8888:
  120. return GDP_RGB888_32;
  121. case DRM_FORMAT_XBGR8888:
  122. return GDP_XBGR8888;
  123. case DRM_FORMAT_ARGB8888:
  124. return GDP_ARGB8888;
  125. case DRM_FORMAT_ABGR8888:
  126. return GDP_ABGR8888;
  127. case DRM_FORMAT_ARGB4444:
  128. return GDP_ARGB4444;
  129. case DRM_FORMAT_ARGB1555:
  130. return GDP_ARGB1555;
  131. case DRM_FORMAT_RGB565:
  132. return GDP_RGB565;
  133. case DRM_FORMAT_RGB888:
  134. return GDP_RGB888;
  135. case DRM_FORMAT_AYUV:
  136. return GDP_AYCBR8888;
  137. case DRM_FORMAT_YUV444:
  138. return GDP_YCBR888;
  139. case DRM_FORMAT_VYUY:
  140. return GDP_YCBR422R;
  141. case DRM_FORMAT_C8:
  142. return GDP_CLUT8;
  143. }
  144. return -1;
  145. }
  146. static int sti_gdp_get_alpharange(int format)
  147. {
  148. switch (format) {
  149. case GDP_ARGB8565:
  150. case GDP_ARGB8888:
  151. case GDP_AYCBR8888:
  152. case GDP_ABGR8888:
  153. return GAM_GDP_ALPHARANGE_255;
  154. }
  155. return 0;
  156. }
  157. /**
  158. * sti_gdp_get_free_nodes
  159. * @gdp: gdp pointer
  160. *
  161. * Look for a GDP node list that is not currently read by the HW.
  162. *
  163. * RETURNS:
  164. * Pointer to the free GDP node list
  165. */
  166. static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
  167. {
  168. int hw_nvn;
  169. unsigned int i;
  170. hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
  171. if (!hw_nvn)
  172. goto end;
  173. for (i = 0; i < GDP_NODE_NB_BANK; i++)
  174. if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
  175. (hw_nvn != gdp->node_list[i].top_field_paddr))
  176. return &gdp->node_list[i];
  177. /* in hazardious cases restart with the first node */
  178. DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
  179. sti_plane_to_str(&gdp->plane), hw_nvn);
  180. end:
  181. return &gdp->node_list[0];
  182. }
  183. /**
  184. * sti_gdp_get_current_nodes
  185. * @gdp: gdp pointer
  186. *
  187. * Look for GDP nodes that are currently read by the HW.
  188. *
  189. * RETURNS:
  190. * Pointer to the current GDP node list
  191. */
  192. static
  193. struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
  194. {
  195. int hw_nvn;
  196. unsigned int i;
  197. hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
  198. if (!hw_nvn)
  199. goto end;
  200. for (i = 0; i < GDP_NODE_NB_BANK; i++)
  201. if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
  202. (hw_nvn == gdp->node_list[i].top_field_paddr))
  203. return &gdp->node_list[i];
  204. end:
  205. DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
  206. hw_nvn, sti_plane_to_str(&gdp->plane));
  207. return NULL;
  208. }
  209. /**
  210. * sti_gdp_disable
  211. * @gdp: gdp pointer
  212. *
  213. * Disable a GDP.
  214. */
  215. static void sti_gdp_disable(struct sti_gdp *gdp)
  216. {
  217. struct drm_plane *drm_plane = &gdp->plane.drm_plane;
  218. struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
  219. struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
  220. unsigned int i;
  221. DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
  222. /* Set the nodes as 'to be ignored on mixer' */
  223. for (i = 0; i < GDP_NODE_NB_BANK; i++) {
  224. gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
  225. gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
  226. }
  227. if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
  228. compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
  229. DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
  230. if (gdp->clk_pix)
  231. clk_disable_unprepare(gdp->clk_pix);
  232. gdp->plane.status = STI_PLANE_DISABLED;
  233. }
  234. /**
  235. * sti_gdp_field_cb
  236. * @nb: notifier block
  237. * @event: event message
  238. * @data: private data
  239. *
  240. * Handle VTG top field and bottom field event.
  241. *
  242. * RETURNS:
  243. * 0 on success.
  244. */
  245. int sti_gdp_field_cb(struct notifier_block *nb,
  246. unsigned long event, void *data)
  247. {
  248. struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
  249. if (gdp->plane.status == STI_PLANE_FLUSHING) {
  250. /* disable need to be synchronize on vsync event */
  251. DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
  252. sti_plane_to_str(&gdp->plane));
  253. sti_gdp_disable(gdp);
  254. }
  255. switch (event) {
  256. case VTG_TOP_FIELD_EVENT:
  257. gdp->is_curr_top = true;
  258. break;
  259. case VTG_BOTTOM_FIELD_EVENT:
  260. gdp->is_curr_top = false;
  261. break;
  262. default:
  263. DRM_ERROR("unsupported event: %lu\n", event);
  264. break;
  265. }
  266. return 0;
  267. }
  268. static void sti_gdp_init(struct sti_gdp *gdp)
  269. {
  270. struct device_node *np = gdp->dev->of_node;
  271. dma_addr_t dma_addr;
  272. void *base;
  273. unsigned int i, size;
  274. /* Allocate all the nodes within a single memory page */
  275. size = sizeof(struct sti_gdp_node) *
  276. GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
  277. base = dma_alloc_writecombine(gdp->dev,
  278. size, &dma_addr, GFP_KERNEL | GFP_DMA);
  279. if (!base) {
  280. DRM_ERROR("Failed to allocate memory for GDP node\n");
  281. return;
  282. }
  283. memset(base, 0, size);
  284. for (i = 0; i < GDP_NODE_NB_BANK; i++) {
  285. if (dma_addr & 0xF) {
  286. DRM_ERROR("Mem alignment failed\n");
  287. return;
  288. }
  289. gdp->node_list[i].top_field = base;
  290. gdp->node_list[i].top_field_paddr = dma_addr;
  291. DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
  292. base += sizeof(struct sti_gdp_node);
  293. dma_addr += sizeof(struct sti_gdp_node);
  294. if (dma_addr & 0xF) {
  295. DRM_ERROR("Mem alignment failed\n");
  296. return;
  297. }
  298. gdp->node_list[i].btm_field = base;
  299. gdp->node_list[i].btm_field_paddr = dma_addr;
  300. DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
  301. base += sizeof(struct sti_gdp_node);
  302. dma_addr += sizeof(struct sti_gdp_node);
  303. }
  304. if (of_device_is_compatible(np, "st,stih407-compositor")) {
  305. /* GDP of STiH407 chip have its own pixel clock */
  306. char *clk_name;
  307. switch (gdp->plane.desc) {
  308. case STI_GDP_0:
  309. clk_name = "pix_gdp1";
  310. break;
  311. case STI_GDP_1:
  312. clk_name = "pix_gdp2";
  313. break;
  314. case STI_GDP_2:
  315. clk_name = "pix_gdp3";
  316. break;
  317. case STI_GDP_3:
  318. clk_name = "pix_gdp4";
  319. break;
  320. default:
  321. DRM_ERROR("GDP id not recognized\n");
  322. return;
  323. }
  324. gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
  325. if (IS_ERR(gdp->clk_pix))
  326. DRM_ERROR("Cannot get %s clock\n", clk_name);
  327. gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
  328. if (IS_ERR(gdp->clk_main_parent))
  329. DRM_ERROR("Cannot get main_parent clock\n");
  330. gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
  331. if (IS_ERR(gdp->clk_aux_parent))
  332. DRM_ERROR("Cannot get aux_parent clock\n");
  333. }
  334. }
  335. static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
  336. struct drm_plane_state *oldstate)
  337. {
  338. struct drm_plane_state *state = drm_plane->state;
  339. struct sti_plane *plane = to_sti_plane(drm_plane);
  340. struct sti_gdp *gdp = to_sti_gdp(plane);
  341. struct drm_crtc *crtc = state->crtc;
  342. struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
  343. struct drm_framebuffer *fb = state->fb;
  344. bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
  345. struct sti_mixer *mixer;
  346. struct drm_display_mode *mode;
  347. int dst_x, dst_y, dst_w, dst_h;
  348. int src_x, src_y, src_w, src_h;
  349. struct drm_gem_cma_object *cma_obj;
  350. struct sti_gdp_node_list *list;
  351. struct sti_gdp_node_list *curr_list;
  352. struct sti_gdp_node *top_field, *btm_field;
  353. u32 dma_updated_top;
  354. u32 dma_updated_btm;
  355. int format;
  356. unsigned int depth, bpp;
  357. u32 ydo, xdo, yds, xds;
  358. int res;
  359. /* Manage the case where crtc is null (disabled) */
  360. if (!crtc)
  361. return;
  362. mixer = to_sti_mixer(crtc);
  363. mode = &crtc->mode;
  364. dst_x = state->crtc_x;
  365. dst_y = state->crtc_y;
  366. dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
  367. dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
  368. /* src_x are in 16.16 format */
  369. src_x = state->src_x >> 16;
  370. src_y = state->src_y >> 16;
  371. src_w = state->src_w >> 16;
  372. src_h = state->src_h >> 16;
  373. DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
  374. crtc->base.id, sti_mixer_to_str(mixer),
  375. drm_plane->base.id, sti_plane_to_str(plane));
  376. DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
  377. sti_plane_to_str(plane),
  378. dst_w, dst_h, dst_x, dst_y,
  379. src_w, src_h, src_x, src_y);
  380. list = sti_gdp_get_free_nodes(gdp);
  381. top_field = list->top_field;
  382. btm_field = list->btm_field;
  383. dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
  384. sti_plane_to_str(plane), top_field, btm_field);
  385. /* build the top field */
  386. top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
  387. top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
  388. format = sti_gdp_fourcc2format(fb->pixel_format);
  389. if (format == -1) {
  390. DRM_ERROR("Format not supported by GDP %.4s\n",
  391. (char *)&fb->pixel_format);
  392. return;
  393. }
  394. top_field->gam_gdp_ctl |= format;
  395. top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
  396. top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
  397. cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
  398. if (!cma_obj) {
  399. DRM_ERROR("Can't get CMA GEM object for fb\n");
  400. return;
  401. }
  402. DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
  403. (char *)&fb->pixel_format,
  404. (unsigned long)cma_obj->paddr);
  405. /* pixel memory location */
  406. drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
  407. top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
  408. top_field->gam_gdp_pml += src_x * (bpp >> 3);
  409. top_field->gam_gdp_pml += src_y * fb->pitches[0];
  410. /* input parameters */
  411. top_field->gam_gdp_pmp = fb->pitches[0];
  412. top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
  413. clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
  414. /* output parameters */
  415. ydo = sti_vtg_get_line_number(*mode, dst_y);
  416. yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
  417. xdo = sti_vtg_get_pixel_number(*mode, dst_x);
  418. xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
  419. top_field->gam_gdp_vpo = (ydo << 16) | xdo;
  420. top_field->gam_gdp_vps = (yds << 16) | xds;
  421. /* Same content and chained together */
  422. memcpy(btm_field, top_field, sizeof(*btm_field));
  423. top_field->gam_gdp_nvn = list->btm_field_paddr;
  424. btm_field->gam_gdp_nvn = list->top_field_paddr;
  425. /* Interlaced mode */
  426. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  427. btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
  428. fb->pitches[0];
  429. if (first_prepare) {
  430. /* Register gdp callback */
  431. if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
  432. compo->vtg_main : compo->vtg_aux,
  433. &gdp->vtg_field_nb, crtc)) {
  434. DRM_ERROR("Cannot register VTG notifier\n");
  435. return;
  436. }
  437. /* Set and enable gdp clock */
  438. if (gdp->clk_pix) {
  439. struct clk *clkp;
  440. int rate = mode->clock * 1000;
  441. /* According to the mixer used, the gdp pixel clock
  442. * should have a different parent clock. */
  443. if (mixer->id == STI_MIXER_MAIN)
  444. clkp = gdp->clk_main_parent;
  445. else
  446. clkp = gdp->clk_aux_parent;
  447. if (clkp)
  448. clk_set_parent(gdp->clk_pix, clkp);
  449. res = clk_set_rate(gdp->clk_pix, rate);
  450. if (res < 0) {
  451. DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
  452. rate);
  453. return;
  454. }
  455. if (clk_prepare_enable(gdp->clk_pix)) {
  456. DRM_ERROR("Failed to prepare/enable gdp\n");
  457. return;
  458. }
  459. }
  460. }
  461. /* Update the NVN field of the 'right' field of the current GDP node
  462. * (being used by the HW) with the address of the updated ('free') top
  463. * field GDP node.
  464. * - In interlaced mode the 'right' field is the bottom field as we
  465. * update frames starting from their top field
  466. * - In progressive mode, we update both bottom and top fields which
  467. * are equal nodes.
  468. * At the next VSYNC, the updated node list will be used by the HW.
  469. */
  470. curr_list = sti_gdp_get_current_nodes(gdp);
  471. dma_updated_top = list->top_field_paddr;
  472. dma_updated_btm = list->btm_field_paddr;
  473. dev_dbg(gdp->dev, "Current NVN:0x%X\n",
  474. readl(gdp->regs + GAM_GDP_NVN_OFFSET));
  475. dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
  476. (unsigned long)cma_obj->paddr,
  477. readl(gdp->regs + GAM_GDP_PML_OFFSET));
  478. if (!curr_list) {
  479. /* First update or invalid node should directly write in the
  480. * hw register */
  481. DRM_DEBUG_DRIVER("%s first update (or invalid node)",
  482. sti_plane_to_str(plane));
  483. writel(gdp->is_curr_top ?
  484. dma_updated_btm : dma_updated_top,
  485. gdp->regs + GAM_GDP_NVN_OFFSET);
  486. goto end;
  487. }
  488. if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
  489. if (gdp->is_curr_top) {
  490. /* Do not update in the middle of the frame, but
  491. * postpone the update after the bottom field has
  492. * been displayed */
  493. curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
  494. } else {
  495. /* Direct update to avoid one frame delay */
  496. writel(dma_updated_top,
  497. gdp->regs + GAM_GDP_NVN_OFFSET);
  498. }
  499. } else {
  500. /* Direct update for progressive to avoid one frame delay */
  501. writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
  502. }
  503. end:
  504. plane->status = STI_PLANE_UPDATED;
  505. }
  506. static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
  507. struct drm_plane_state *oldstate)
  508. {
  509. struct sti_plane *plane = to_sti_plane(drm_plane);
  510. struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
  511. if (!drm_plane->crtc) {
  512. DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
  513. drm_plane->base.id);
  514. return;
  515. }
  516. DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
  517. drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
  518. drm_plane->base.id, sti_plane_to_str(plane));
  519. plane->status = STI_PLANE_DISABLING;
  520. }
  521. static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
  522. .atomic_update = sti_gdp_atomic_update,
  523. .atomic_disable = sti_gdp_atomic_disable,
  524. };
  525. struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
  526. struct device *dev, int desc,
  527. void __iomem *baseaddr,
  528. unsigned int possible_crtcs,
  529. enum drm_plane_type type)
  530. {
  531. struct sti_gdp *gdp;
  532. int res;
  533. gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
  534. if (!gdp) {
  535. DRM_ERROR("Failed to allocate memory for GDP\n");
  536. return NULL;
  537. }
  538. gdp->dev = dev;
  539. gdp->regs = baseaddr;
  540. gdp->plane.desc = desc;
  541. gdp->plane.status = STI_PLANE_DISABLED;
  542. gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
  543. sti_gdp_init(gdp);
  544. res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
  545. possible_crtcs,
  546. &sti_plane_helpers_funcs,
  547. gdp_supported_formats,
  548. ARRAY_SIZE(gdp_supported_formats),
  549. type);
  550. if (res) {
  551. DRM_ERROR("Failed to initialize universal plane\n");
  552. goto err;
  553. }
  554. drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
  555. sti_plane_init_property(&gdp->plane, type);
  556. return &gdp->plane.drm_plane;
  557. err:
  558. devm_kfree(dev, gdp);
  559. return NULL;
  560. }