bdisp-hw.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * Copyright (C) STMicroelectronics SA 2014
  3. * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
  4. * License terms: GNU General Public License (GPL), version 2
  5. */
  6. #include <linux/delay.h>
  7. #include "bdisp.h"
  8. #include "bdisp-filter.h"
  9. #include "bdisp-reg.h"
  10. /* Max width of the source frame in a single node */
  11. #define MAX_SRC_WIDTH 2048
  12. /* Reset & boot poll config */
  13. #define POLL_RST_MAX 50
  14. #define POLL_RST_DELAY_MS 20
  15. enum bdisp_target_plan {
  16. BDISP_RGB,
  17. BDISP_Y,
  18. BDISP_CBCR
  19. };
  20. struct bdisp_op_cfg {
  21. bool cconv; /* RGB - YUV conversion */
  22. bool hflip; /* Horizontal flip */
  23. bool vflip; /* Vertical flip */
  24. bool wide; /* Wide (>MAX_SRC_WIDTH) */
  25. bool scale; /* Scale */
  26. u16 h_inc; /* Horizontal increment in 6.10 format */
  27. u16 v_inc; /* Vertical increment in 6.10 format */
  28. bool src_interlaced; /* is the src an interlaced buffer */
  29. u8 src_nbp; /* nb of planes of the src */
  30. bool src_yuv; /* is the src a YUV color format */
  31. bool src_420; /* is the src 4:2:0 chroma subsampled */
  32. u8 dst_nbp; /* nb of planes of the dst */
  33. bool dst_yuv; /* is the dst a YUV color format */
  34. bool dst_420; /* is the dst 4:2:0 chroma subsampled */
  35. };
  36. struct bdisp_filter_addr {
  37. u16 min; /* Filter min scale factor (6.10 fixed point) */
  38. u16 max; /* Filter max scale factor (6.10 fixed point) */
  39. void *virt; /* Virtual address for filter table */
  40. dma_addr_t paddr; /* Physical address for filter table */
  41. };
  42. static struct bdisp_filter_addr bdisp_h_filter[NB_H_FILTER];
  43. static struct bdisp_filter_addr bdisp_v_filter[NB_V_FILTER];
  44. /**
  45. * bdisp_hw_reset
  46. * @bdisp: bdisp entity
  47. *
  48. * Resets HW
  49. *
  50. * RETURNS:
  51. * 0 on success.
  52. */
  53. int bdisp_hw_reset(struct bdisp_dev *bdisp)
  54. {
  55. unsigned int i;
  56. dev_dbg(bdisp->dev, "%s\n", __func__);
  57. /* Mask Interrupt */
  58. writel(0, bdisp->regs + BLT_ITM0);
  59. /* Reset */
  60. writel(readl(bdisp->regs + BLT_CTL) | BLT_CTL_RESET,
  61. bdisp->regs + BLT_CTL);
  62. writel(0, bdisp->regs + BLT_CTL);
  63. /* Wait for reset done */
  64. for (i = 0; i < POLL_RST_MAX; i++) {
  65. if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
  66. break;
  67. msleep(POLL_RST_DELAY_MS);
  68. }
  69. if (i == POLL_RST_MAX)
  70. dev_err(bdisp->dev, "Reset timeout\n");
  71. return (i == POLL_RST_MAX) ? -EAGAIN : 0;
  72. }
  73. /**
  74. * bdisp_hw_get_and_clear_irq
  75. * @bdisp: bdisp entity
  76. *
  77. * Read then reset interrupt status
  78. *
  79. * RETURNS:
  80. * 0 if expected interrupt was raised.
  81. */
  82. int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp)
  83. {
  84. u32 its;
  85. its = readl(bdisp->regs + BLT_ITS);
  86. /* Check for the only expected IT: LastNode of AQ1 */
  87. if (!(its & BLT_ITS_AQ1_LNA)) {
  88. dev_dbg(bdisp->dev, "Unexpected IT status: 0x%08X\n", its);
  89. writel(its, bdisp->regs + BLT_ITS);
  90. return -1;
  91. }
  92. /* Clear and mask */
  93. writel(its, bdisp->regs + BLT_ITS);
  94. writel(0, bdisp->regs + BLT_ITM0);
  95. return 0;
  96. }
  97. /**
  98. * bdisp_hw_free_nodes
  99. * @ctx: bdisp context
  100. *
  101. * Free node memory
  102. *
  103. * RETURNS:
  104. * None
  105. */
  106. void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
  107. {
  108. if (ctx && ctx->node[0]) {
  109. DEFINE_DMA_ATTRS(attrs);
  110. dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
  111. dma_free_attrs(ctx->bdisp_dev->dev,
  112. sizeof(struct bdisp_node) * MAX_NB_NODE,
  113. ctx->node[0], ctx->node_paddr[0], &attrs);
  114. }
  115. }
  116. /**
  117. * bdisp_hw_alloc_nodes
  118. * @ctx: bdisp context
  119. *
  120. * Allocate dma memory for nodes
  121. *
  122. * RETURNS:
  123. * 0 on success
  124. */
  125. int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx)
  126. {
  127. struct device *dev = ctx->bdisp_dev->dev;
  128. unsigned int i, node_size = sizeof(struct bdisp_node);
  129. void *base;
  130. dma_addr_t paddr;
  131. DEFINE_DMA_ATTRS(attrs);
  132. /* Allocate all the nodes within a single memory page */
  133. dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
  134. base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
  135. GFP_KERNEL | GFP_DMA, &attrs);
  136. if (!base) {
  137. dev_err(dev, "%s no mem\n", __func__);
  138. return -ENOMEM;
  139. }
  140. memset(base, 0, node_size * MAX_NB_NODE);
  141. for (i = 0; i < MAX_NB_NODE; i++) {
  142. ctx->node[i] = base;
  143. ctx->node_paddr[i] = paddr;
  144. dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i],
  145. &paddr);
  146. base += node_size;
  147. paddr += node_size;
  148. }
  149. return 0;
  150. }
  151. /**
  152. * bdisp_hw_free_filters
  153. * @dev: device
  154. *
  155. * Free filters memory
  156. *
  157. * RETURNS:
  158. * None
  159. */
  160. void bdisp_hw_free_filters(struct device *dev)
  161. {
  162. int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
  163. if (bdisp_h_filter[0].virt) {
  164. DEFINE_DMA_ATTRS(attrs);
  165. dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
  166. dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
  167. bdisp_h_filter[0].paddr, &attrs);
  168. }
  169. }
  170. /**
  171. * bdisp_hw_alloc_filters
  172. * @dev: device
  173. *
  174. * Allocate dma memory for filters
  175. *
  176. * RETURNS:
  177. * 0 on success
  178. */
  179. int bdisp_hw_alloc_filters(struct device *dev)
  180. {
  181. unsigned int i, size;
  182. void *base;
  183. dma_addr_t paddr;
  184. DEFINE_DMA_ATTRS(attrs);
  185. /* Allocate all the filters within a single memory page */
  186. size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
  187. dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
  188. base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs);
  189. if (!base)
  190. return -ENOMEM;
  191. /* Setup filter addresses */
  192. for (i = 0; i < NB_H_FILTER; i++) {
  193. bdisp_h_filter[i].min = bdisp_h_spec[i].min;
  194. bdisp_h_filter[i].max = bdisp_h_spec[i].max;
  195. memcpy(base, bdisp_h_spec[i].coef, BDISP_HF_NB);
  196. bdisp_h_filter[i].virt = base;
  197. bdisp_h_filter[i].paddr = paddr;
  198. base += BDISP_HF_NB;
  199. paddr += BDISP_HF_NB;
  200. }
  201. for (i = 0; i < NB_V_FILTER; i++) {
  202. bdisp_v_filter[i].min = bdisp_v_spec[i].min;
  203. bdisp_v_filter[i].max = bdisp_v_spec[i].max;
  204. memcpy(base, bdisp_v_spec[i].coef, BDISP_VF_NB);
  205. bdisp_v_filter[i].virt = base;
  206. bdisp_v_filter[i].paddr = paddr;
  207. base += BDISP_VF_NB;
  208. paddr += BDISP_VF_NB;
  209. }
  210. return 0;
  211. }
  212. /**
  213. * bdisp_hw_get_hf_addr
  214. * @inc: resize increment
  215. *
  216. * Find the horizontal filter table that fits the resize increment
  217. *
  218. * RETURNS:
  219. * table physical address
  220. */
  221. static dma_addr_t bdisp_hw_get_hf_addr(u16 inc)
  222. {
  223. unsigned int i;
  224. for (i = NB_H_FILTER - 1; i > 0; i--)
  225. if ((bdisp_h_filter[i].min < inc) &&
  226. (inc <= bdisp_h_filter[i].max))
  227. break;
  228. return bdisp_h_filter[i].paddr;
  229. }
  230. /**
  231. * bdisp_hw_get_vf_addr
  232. * @inc: resize increment
  233. *
  234. * Find the vertical filter table that fits the resize increment
  235. *
  236. * RETURNS:
  237. * table physical address
  238. */
  239. static dma_addr_t bdisp_hw_get_vf_addr(u16 inc)
  240. {
  241. unsigned int i;
  242. for (i = NB_V_FILTER - 1; i > 0; i--)
  243. if ((bdisp_v_filter[i].min < inc) &&
  244. (inc <= bdisp_v_filter[i].max))
  245. break;
  246. return bdisp_v_filter[i].paddr;
  247. }
  248. /**
  249. * bdisp_hw_get_inc
  250. * @from: input size
  251. * @to: output size
  252. * @inc: resize increment in 6.10 format
  253. *
  254. * Computes the increment (inverse of scale) in 6.10 format
  255. *
  256. * RETURNS:
  257. * 0 on success
  258. */
  259. static int bdisp_hw_get_inc(u32 from, u32 to, u16 *inc)
  260. {
  261. u32 tmp;
  262. if (!to)
  263. return -EINVAL;
  264. if (to == from) {
  265. *inc = 1 << 10;
  266. return 0;
  267. }
  268. tmp = (from << 10) / to;
  269. if ((tmp > 0xFFFF) || (!tmp))
  270. /* overflow (downscale x 63) or too small (upscale x 1024) */
  271. return -EINVAL;
  272. *inc = (u16)tmp;
  273. return 0;
  274. }
  275. /**
  276. * bdisp_hw_get_hv_inc
  277. * @ctx: device context
  278. * @h_inc: horizontal increment
  279. * @v_inc: vertical increment
  280. *
  281. * Computes the horizontal & vertical increments (inverse of scale)
  282. *
  283. * RETURNS:
  284. * 0 on success
  285. */
  286. static int bdisp_hw_get_hv_inc(struct bdisp_ctx *ctx, u16 *h_inc, u16 *v_inc)
  287. {
  288. u32 src_w, src_h, dst_w, dst_h;
  289. src_w = ctx->src.crop.width;
  290. src_h = ctx->src.crop.height;
  291. dst_w = ctx->dst.crop.width;
  292. dst_h = ctx->dst.crop.height;
  293. if (bdisp_hw_get_inc(src_w, dst_w, h_inc) ||
  294. bdisp_hw_get_inc(src_h, dst_h, v_inc)) {
  295. dev_err(ctx->bdisp_dev->dev,
  296. "scale factors failed (%dx%d)->(%dx%d)\n",
  297. src_w, src_h, dst_w, dst_h);
  298. return -EINVAL;
  299. }
  300. return 0;
  301. }
  302. /**
  303. * bdisp_hw_get_op_cfg
  304. * @ctx: device context
  305. * @c: operation configuration
  306. *
  307. * Check which blitter operations are expected and sets the scaling increments
  308. *
  309. * RETURNS:
  310. * 0 on success
  311. */
  312. static int bdisp_hw_get_op_cfg(struct bdisp_ctx *ctx, struct bdisp_op_cfg *c)
  313. {
  314. struct device *dev = ctx->bdisp_dev->dev;
  315. struct bdisp_frame *src = &ctx->src;
  316. struct bdisp_frame *dst = &ctx->dst;
  317. if (src->width > MAX_SRC_WIDTH * MAX_VERTICAL_STRIDES) {
  318. dev_err(dev, "Image width out of HW caps\n");
  319. return -EINVAL;
  320. }
  321. c->wide = src->width > MAX_SRC_WIDTH;
  322. c->hflip = ctx->hflip;
  323. c->vflip = ctx->vflip;
  324. c->src_interlaced = (src->field == V4L2_FIELD_INTERLACED);
  325. c->src_nbp = src->fmt->nb_planes;
  326. c->src_yuv = (src->fmt->pixelformat == V4L2_PIX_FMT_NV12) ||
  327. (src->fmt->pixelformat == V4L2_PIX_FMT_YUV420);
  328. c->src_420 = c->src_yuv;
  329. c->dst_nbp = dst->fmt->nb_planes;
  330. c->dst_yuv = (dst->fmt->pixelformat == V4L2_PIX_FMT_NV12) ||
  331. (dst->fmt->pixelformat == V4L2_PIX_FMT_YUV420);
  332. c->dst_420 = c->dst_yuv;
  333. c->cconv = (c->src_yuv != c->dst_yuv);
  334. if (bdisp_hw_get_hv_inc(ctx, &c->h_inc, &c->v_inc)) {
  335. dev_err(dev, "Scale factor out of HW caps\n");
  336. return -EINVAL;
  337. }
  338. /* Deinterlacing adjustment : stretch a field to a frame */
  339. if (c->src_interlaced)
  340. c->v_inc /= 2;
  341. if ((c->h_inc != (1 << 10)) || (c->v_inc != (1 << 10)))
  342. c->scale = true;
  343. else
  344. c->scale = false;
  345. return 0;
  346. }
  347. /**
  348. * bdisp_hw_color_format
  349. * @pixelformat: v4l2 pixel format
  350. *
  351. * v4l2 to bdisp pixel format convert
  352. *
  353. * RETURNS:
  354. * bdisp pixel format
  355. */
  356. static u32 bdisp_hw_color_format(u32 pixelformat)
  357. {
  358. u32 ret;
  359. switch (pixelformat) {
  360. case V4L2_PIX_FMT_YUV420:
  361. ret = (BDISP_YUV_3B << BLT_TTY_COL_SHIFT);
  362. break;
  363. case V4L2_PIX_FMT_NV12:
  364. ret = (BDISP_NV12 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END;
  365. break;
  366. case V4L2_PIX_FMT_RGB565:
  367. ret = (BDISP_RGB565 << BLT_TTY_COL_SHIFT);
  368. break;
  369. case V4L2_PIX_FMT_XBGR32: /* This V4L format actually refers to xRGB */
  370. ret = (BDISP_XRGB8888 << BLT_TTY_COL_SHIFT);
  371. break;
  372. case V4L2_PIX_FMT_RGB24: /* RGB888 format */
  373. ret = (BDISP_RGB888 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END;
  374. break;
  375. case V4L2_PIX_FMT_ABGR32: /* This V4L format actually refers to ARGB */
  376. default:
  377. ret = (BDISP_ARGB8888 << BLT_TTY_COL_SHIFT) | BLT_TTY_ALPHA_R;
  378. break;
  379. }
  380. return ret;
  381. }
  382. /**
  383. * bdisp_hw_build_node
  384. * @ctx: device context
  385. * @cfg: operation configuration
  386. * @node: node to be set
  387. * @t_plan: whether the node refers to a RGB/Y or a CbCr plane
  388. * @src_x_offset: x offset in the source image
  389. *
  390. * Build a node
  391. *
  392. * RETURNS:
  393. * None
  394. */
  395. static void bdisp_hw_build_node(struct bdisp_ctx *ctx,
  396. struct bdisp_op_cfg *cfg,
  397. struct bdisp_node *node,
  398. enum bdisp_target_plan t_plan, int src_x_offset)
  399. {
  400. struct bdisp_frame *src = &ctx->src;
  401. struct bdisp_frame *dst = &ctx->dst;
  402. u16 h_inc, v_inc, yh_inc, yv_inc;
  403. struct v4l2_rect src_rect = src->crop;
  404. struct v4l2_rect dst_rect = dst->crop;
  405. int dst_x_offset;
  406. s32 dst_width = dst->crop.width;
  407. u32 src_fmt, dst_fmt;
  408. const u32 *ivmx;
  409. dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);
  410. memset(node, 0, sizeof(*node));
  411. /* Adjust src and dst areas wrt src_x_offset */
  412. src_rect.left += src_x_offset;
  413. src_rect.width -= src_x_offset;
  414. src_rect.width = min_t(__s32, MAX_SRC_WIDTH, src_rect.width);
  415. dst_x_offset = (src_x_offset * dst_width) / ctx->src.crop.width;
  416. dst_rect.left += dst_x_offset;
  417. dst_rect.width = (src_rect.width * dst_width) / ctx->src.crop.width;
  418. /* General */
  419. src_fmt = src->fmt->pixelformat;
  420. dst_fmt = dst->fmt->pixelformat;
  421. node->nip = 0;
  422. node->cic = BLT_CIC_ALL_GRP;
  423. node->ack = BLT_ACK_BYPASS_S2S3;
  424. switch (cfg->src_nbp) {
  425. case 1:
  426. /* Src2 = RGB / Src1 = Src3 = off */
  427. node->ins = BLT_INS_S1_OFF | BLT_INS_S2_MEM | BLT_INS_S3_OFF;
  428. break;
  429. case 2:
  430. /* Src3 = Y
  431. * Src2 = CbCr or ColorFill if writing the Y plane
  432. * Src1 = off */
  433. node->ins = BLT_INS_S1_OFF | BLT_INS_S3_MEM;
  434. if (t_plan == BDISP_Y)
  435. node->ins |= BLT_INS_S2_CF;
  436. else
  437. node->ins |= BLT_INS_S2_MEM;
  438. break;
  439. case 3:
  440. default:
  441. /* Src3 = Y
  442. * Src2 = Cb or ColorFill if writing the Y plane
  443. * Src1 = Cr or ColorFill if writing the Y plane */
  444. node->ins = BLT_INS_S3_MEM;
  445. if (t_plan == BDISP_Y)
  446. node->ins |= BLT_INS_S2_CF | BLT_INS_S1_CF;
  447. else
  448. node->ins |= BLT_INS_S2_MEM | BLT_INS_S1_MEM;
  449. break;
  450. }
  451. /* Color convert */
  452. node->ins |= cfg->cconv ? BLT_INS_IVMX : 0;
  453. /* Scale needed if scaling OR 4:2:0 up/downsampling */
  454. node->ins |= (cfg->scale || cfg->src_420 || cfg->dst_420) ?
  455. BLT_INS_SCALE : 0;
  456. /* Target */
  457. node->tba = (t_plan == BDISP_CBCR) ? dst->paddr[1] : dst->paddr[0];
  458. node->tty = dst->bytesperline;
  459. node->tty |= bdisp_hw_color_format(dst_fmt);
  460. node->tty |= BLT_TTY_DITHER;
  461. node->tty |= (t_plan == BDISP_CBCR) ? BLT_TTY_CHROMA : 0;
  462. node->tty |= cfg->hflip ? BLT_TTY_HSO : 0;
  463. node->tty |= cfg->vflip ? BLT_TTY_VSO : 0;
  464. if (cfg->dst_420 && (t_plan == BDISP_CBCR)) {
  465. /* 420 chroma downsampling */
  466. dst_rect.height /= 2;
  467. dst_rect.width /= 2;
  468. dst_rect.left /= 2;
  469. dst_rect.top /= 2;
  470. dst_x_offset /= 2;
  471. dst_width /= 2;
  472. }
  473. node->txy = cfg->vflip ? (dst_rect.height - 1) : dst_rect.top;
  474. node->txy <<= 16;
  475. node->txy |= cfg->hflip ? (dst_width - dst_x_offset - 1) :
  476. dst_rect.left;
  477. node->tsz = dst_rect.height << 16 | dst_rect.width;
  478. if (cfg->src_interlaced) {
  479. /* handle only the top field which is half height of a frame */
  480. src_rect.top /= 2;
  481. src_rect.height /= 2;
  482. }
  483. if (cfg->src_nbp == 1) {
  484. /* Src 2 : RGB */
  485. node->s2ba = src->paddr[0];
  486. node->s2ty = src->bytesperline;
  487. if (cfg->src_interlaced)
  488. node->s2ty *= 2;
  489. node->s2ty |= bdisp_hw_color_format(src_fmt);
  490. node->s2xy = src_rect.top << 16 | src_rect.left;
  491. node->s2sz = src_rect.height << 16 | src_rect.width;
  492. } else {
  493. /* Src 2 : Cb or CbCr */
  494. if (cfg->src_420) {
  495. /* 420 chroma upsampling */
  496. src_rect.top /= 2;
  497. src_rect.left /= 2;
  498. src_rect.width /= 2;
  499. src_rect.height /= 2;
  500. }
  501. node->s2ba = src->paddr[1];
  502. node->s2ty = src->bytesperline;
  503. if (cfg->src_nbp == 3)
  504. node->s2ty /= 2;
  505. if (cfg->src_interlaced)
  506. node->s2ty *= 2;
  507. node->s2ty |= bdisp_hw_color_format(src_fmt);
  508. node->s2xy = src_rect.top << 16 | src_rect.left;
  509. node->s2sz = src_rect.height << 16 | src_rect.width;
  510. if (cfg->src_nbp == 3) {
  511. /* Src 1 : Cr */
  512. node->s1ba = src->paddr[2];
  513. node->s1ty = node->s2ty;
  514. node->s1xy = node->s2xy;
  515. }
  516. /* Src 3 : Y */
  517. node->s3ba = src->paddr[0];
  518. node->s3ty = src->bytesperline;
  519. if (cfg->src_interlaced)
  520. node->s3ty *= 2;
  521. node->s3ty |= bdisp_hw_color_format(src_fmt);
  522. if ((t_plan != BDISP_CBCR) && cfg->src_420) {
  523. /* No chroma upsampling for output RGB / Y plane */
  524. node->s3xy = node->s2xy * 2;
  525. node->s3sz = node->s2sz * 2;
  526. } else {
  527. /* No need to read Y (Src3) when writing Chroma */
  528. node->s3ty |= BLT_S3TY_BLANK_ACC;
  529. node->s3xy = node->s2xy;
  530. node->s3sz = node->s2sz;
  531. }
  532. }
  533. /* Resize (scale OR 4:2:0: chroma up/downsampling) */
  534. if (node->ins & BLT_INS_SCALE) {
  535. /* no need to compute Y when writing CbCr from RGB input */
  536. bool skip_y = (t_plan == BDISP_CBCR) && !cfg->src_yuv;
  537. /* FCTL */
  538. if (cfg->scale) {
  539. node->fctl = BLT_FCTL_HV_SCALE;
  540. if (!skip_y)
  541. node->fctl |= BLT_FCTL_Y_HV_SCALE;
  542. } else {
  543. node->fctl = BLT_FCTL_HV_SAMPLE;
  544. if (!skip_y)
  545. node->fctl |= BLT_FCTL_Y_HV_SAMPLE;
  546. }
  547. /* RSF - Chroma may need to be up/downsampled */
  548. h_inc = cfg->h_inc;
  549. v_inc = cfg->v_inc;
  550. if (!cfg->src_420 && cfg->dst_420 && (t_plan == BDISP_CBCR)) {
  551. /* RGB to 4:2:0 for Chroma: downsample */
  552. h_inc *= 2;
  553. v_inc *= 2;
  554. } else if (cfg->src_420 && !cfg->dst_420) {
  555. /* 4:2:0: to RGB: upsample*/
  556. h_inc /= 2;
  557. v_inc /= 2;
  558. }
  559. node->rsf = v_inc << 16 | h_inc;
  560. /* RZI */
  561. node->rzi = BLT_RZI_DEFAULT;
  562. /* Filter table physical addr */
  563. node->hfp = bdisp_hw_get_hf_addr(h_inc);
  564. node->vfp = bdisp_hw_get_vf_addr(v_inc);
  565. /* Y version */
  566. if (!skip_y) {
  567. yh_inc = cfg->h_inc;
  568. yv_inc = cfg->v_inc;
  569. node->y_rsf = yv_inc << 16 | yh_inc;
  570. node->y_rzi = BLT_RZI_DEFAULT;
  571. node->y_hfp = bdisp_hw_get_hf_addr(yh_inc);
  572. node->y_vfp = bdisp_hw_get_vf_addr(yv_inc);
  573. }
  574. }
  575. /* Versatile matrix for RGB / YUV conversion */
  576. if (cfg->cconv) {
  577. ivmx = cfg->src_yuv ? bdisp_yuv_to_rgb : bdisp_rgb_to_yuv;
  578. node->ivmx0 = ivmx[0];
  579. node->ivmx1 = ivmx[1];
  580. node->ivmx2 = ivmx[2];
  581. node->ivmx3 = ivmx[3];
  582. }
  583. }
  584. /**
  585. * bdisp_hw_build_all_nodes
  586. * @ctx: device context
  587. *
  588. * Build all the nodes for the blitter operation
  589. *
  590. * RETURNS:
  591. * 0 on success
  592. */
  593. static int bdisp_hw_build_all_nodes(struct bdisp_ctx *ctx)
  594. {
  595. struct bdisp_op_cfg cfg;
  596. unsigned int i, nid = 0;
  597. int src_x_offset = 0;
  598. for (i = 0; i < MAX_NB_NODE; i++)
  599. if (!ctx->node[i]) {
  600. dev_err(ctx->bdisp_dev->dev, "node %d is null\n", i);
  601. return -EINVAL;
  602. }
  603. /* Get configuration (scale, flip, ...) */
  604. if (bdisp_hw_get_op_cfg(ctx, &cfg))
  605. return -EINVAL;
  606. /* Split source in vertical strides (HW constraint) */
  607. for (i = 0; i < MAX_VERTICAL_STRIDES; i++) {
  608. /* Build RGB/Y node and link it to the previous node */
  609. bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
  610. cfg.dst_nbp == 1 ? BDISP_RGB : BDISP_Y,
  611. src_x_offset);
  612. if (nid)
  613. ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
  614. nid++;
  615. /* Build additional Cb(Cr) node, link it to the previous one */
  616. if (cfg.dst_nbp > 1) {
  617. bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
  618. BDISP_CBCR, src_x_offset);
  619. ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
  620. nid++;
  621. }
  622. /* Next stride until full width covered */
  623. src_x_offset += MAX_SRC_WIDTH;
  624. if (src_x_offset >= ctx->src.crop.width)
  625. break;
  626. }
  627. /* Mark last node as the last */
  628. ctx->node[nid - 1]->nip = 0;
  629. return 0;
  630. }
  631. /**
  632. * bdisp_hw_save_request
  633. * @ctx: device context
  634. *
  635. * Save a copy of the request and of the built nodes
  636. *
  637. * RETURNS:
  638. * None
  639. */
  640. static void bdisp_hw_save_request(struct bdisp_ctx *ctx)
  641. {
  642. struct bdisp_node **copy_node = ctx->bdisp_dev->dbg.copy_node;
  643. struct bdisp_request *request = &ctx->bdisp_dev->dbg.copy_request;
  644. struct bdisp_node **node = ctx->node;
  645. int i;
  646. /* Request copy */
  647. request->src = ctx->src;
  648. request->dst = ctx->dst;
  649. request->hflip = ctx->hflip;
  650. request->vflip = ctx->vflip;
  651. request->nb_req++;
  652. /* Nodes copy */
  653. for (i = 0; i < MAX_NB_NODE; i++) {
  654. /* Allocate memory if not done yet */
  655. if (!copy_node[i]) {
  656. copy_node[i] = devm_kzalloc(ctx->bdisp_dev->dev,
  657. sizeof(*copy_node[i]),
  658. GFP_KERNEL);
  659. if (!copy_node[i])
  660. return;
  661. }
  662. *copy_node[i] = *node[i];
  663. }
  664. }
  665. /**
  666. * bdisp_hw_update
  667. * @ctx: device context
  668. *
  669. * Send the request to the HW
  670. *
  671. * RETURNS:
  672. * 0 on success
  673. */
  674. int bdisp_hw_update(struct bdisp_ctx *ctx)
  675. {
  676. int ret;
  677. struct bdisp_dev *bdisp = ctx->bdisp_dev;
  678. struct device *dev = bdisp->dev;
  679. unsigned int node_id;
  680. dev_dbg(dev, "%s\n", __func__);
  681. /* build nodes */
  682. ret = bdisp_hw_build_all_nodes(ctx);
  683. if (ret) {
  684. dev_err(dev, "cannot build nodes (%d)\n", ret);
  685. return ret;
  686. }
  687. /* Save a copy of the request */
  688. bdisp_hw_save_request(ctx);
  689. /* Configure interrupt to 'Last Node Reached for AQ1' */
  690. writel(BLT_AQ1_CTL_CFG, bdisp->regs + BLT_AQ1_CTL);
  691. writel(BLT_ITS_AQ1_LNA, bdisp->regs + BLT_ITM0);
  692. /* Write first node addr */
  693. writel(ctx->node_paddr[0], bdisp->regs + BLT_AQ1_IP);
  694. /* Find and write last node addr : this starts the HW processing */
  695. for (node_id = 0; node_id < MAX_NB_NODE - 1; node_id++) {
  696. if (!ctx->node[node_id]->nip)
  697. break;
  698. }
  699. writel(ctx->node_paddr[node_id], bdisp->regs + BLT_AQ1_LNA);
  700. return 0;
  701. }