mixer_reg.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * Samsung TV Mixer driver
  3. *
  4. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  5. *
  6. * Tomasz Stanislawski, <t.stanislaws@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published
  10. * by the Free Software Foundiation. either version 2 of the License,
  11. * or (at your option) any later version
  12. */
  13. #include "mixer.h"
  14. #include "regs-mixer.h"
  15. #include "regs-vp.h"
  16. #include <linux/delay.h>
  17. /* Register access subroutines */
  18. static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
  19. {
  20. return readl(mdev->res.vp_regs + reg_id);
  21. }
  22. static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
  23. {
  24. writel(val, mdev->res.vp_regs + reg_id);
  25. }
  26. static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
  27. u32 val, u32 mask)
  28. {
  29. u32 old = vp_read(mdev, reg_id);
  30. val = (val & mask) | (old & ~mask);
  31. writel(val, mdev->res.vp_regs + reg_id);
  32. }
  33. static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
  34. {
  35. return readl(mdev->res.mxr_regs + reg_id);
  36. }
  37. static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
  38. {
  39. writel(val, mdev->res.mxr_regs + reg_id);
  40. }
  41. static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
  42. u32 val, u32 mask)
  43. {
  44. u32 old = mxr_read(mdev, reg_id);
  45. val = (val & mask) | (old & ~mask);
  46. writel(val, mdev->res.mxr_regs + reg_id);
  47. }
  48. void mxr_vsync_set_update(struct mxr_device *mdev, int en)
  49. {
  50. /* block update on vsync */
  51. mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
  52. MXR_STATUS_SYNC_ENABLE);
  53. vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
  54. }
  55. static void __mxr_reg_vp_reset(struct mxr_device *mdev)
  56. {
  57. int tries = 100;
  58. vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
  59. for (tries = 100; tries; --tries) {
  60. /* waiting until VP_SRESET_PROCESSING is 0 */
  61. if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
  62. break;
  63. mdelay(10);
  64. }
  65. WARN(tries == 0, "failed to reset Video Processor\n");
  66. }
  67. static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
  68. void mxr_reg_reset(struct mxr_device *mdev)
  69. {
  70. unsigned long flags;
  71. u32 val; /* value stored to register */
  72. spin_lock_irqsave(&mdev->reg_slock, flags);
  73. mxr_vsync_set_update(mdev, MXR_DISABLE);
  74. /* set output in RGB888 mode */
  75. mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
  76. /* 16 beat burst in DMA */
  77. mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
  78. MXR_STATUS_BURST_MASK);
  79. /* setting default layer priority: layer1 > video > layer0
  80. * because typical usage scenario would be
  81. * layer0 - framebuffer
  82. * video - video overlay
  83. * layer1 - OSD
  84. */
  85. val = MXR_LAYER_CFG_GRP0_VAL(1);
  86. val |= MXR_LAYER_CFG_VP_VAL(2);
  87. val |= MXR_LAYER_CFG_GRP1_VAL(3);
  88. mxr_write(mdev, MXR_LAYER_CFG, val);
  89. /* use dark gray background color */
  90. mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
  91. mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
  92. mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
  93. /* setting graphical layers */
  94. val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
  95. val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
  96. val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
  97. /* the same configuration for both layers */
  98. mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
  99. mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
  100. /* configuration of Video Processor Registers */
  101. __mxr_reg_vp_reset(mdev);
  102. mxr_reg_vp_default_filter(mdev);
  103. /* enable all interrupts */
  104. mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
  105. mxr_vsync_set_update(mdev, MXR_ENABLE);
  106. spin_unlock_irqrestore(&mdev->reg_slock, flags);
  107. }
  108. void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
  109. const struct mxr_format *fmt, const struct mxr_geometry *geo)
  110. {
  111. u32 val;
  112. unsigned long flags;
  113. spin_lock_irqsave(&mdev->reg_slock, flags);
  114. mxr_vsync_set_update(mdev, MXR_DISABLE);
  115. /* setup format */
  116. mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
  117. MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
  118. /* setup geometry */
  119. mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
  120. val = MXR_GRP_WH_WIDTH(geo->src.width);
  121. val |= MXR_GRP_WH_HEIGHT(geo->src.height);
  122. val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
  123. val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
  124. mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
  125. /* setup offsets in source image */
  126. val = MXR_GRP_SXY_SX(geo->src.x_offset);
  127. val |= MXR_GRP_SXY_SY(geo->src.y_offset);
  128. mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
  129. /* setup offsets in display image */
  130. val = MXR_GRP_DXY_DX(geo->dst.x_offset);
  131. val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
  132. mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
  133. mxr_vsync_set_update(mdev, MXR_ENABLE);
  134. spin_unlock_irqrestore(&mdev->reg_slock, flags);
  135. }
  136. void mxr_reg_vp_format(struct mxr_device *mdev,
  137. const struct mxr_format *fmt, const struct mxr_geometry *geo)
  138. {
  139. unsigned long flags;
  140. spin_lock_irqsave(&mdev->reg_slock, flags);
  141. mxr_vsync_set_update(mdev, MXR_DISABLE);
  142. vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
  143. /* setting size of input image */
  144. vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
  145. VP_IMG_VSIZE(geo->src.full_height));
  146. /* chroma height has to reduced by 2 to avoid chroma distorions */
  147. vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
  148. VP_IMG_VSIZE(geo->src.full_height / 2));
  149. vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
  150. vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
  151. vp_write(mdev, VP_SRC_H_POSITION,
  152. VP_SRC_H_POSITION_VAL(geo->src.x_offset));
  153. vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
  154. vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
  155. vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
  156. if (geo->dst.field == V4L2_FIELD_INTERLACED) {
  157. vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
  158. vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
  159. } else {
  160. vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
  161. vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
  162. }
  163. vp_write(mdev, VP_H_RATIO, geo->x_ratio);
  164. vp_write(mdev, VP_V_RATIO, geo->y_ratio);
  165. vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
  166. mxr_vsync_set_update(mdev, MXR_ENABLE);
  167. spin_unlock_irqrestore(&mdev->reg_slock, flags);
  168. }
  169. void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
  170. {
  171. u32 val = addr ? ~0 : 0;
  172. unsigned long flags;
  173. spin_lock_irqsave(&mdev->reg_slock, flags);
  174. mxr_vsync_set_update(mdev, MXR_DISABLE);
  175. if (idx == 0)
  176. mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
  177. else
  178. mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
  179. mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
  180. mxr_vsync_set_update(mdev, MXR_ENABLE);
  181. spin_unlock_irqrestore(&mdev->reg_slock, flags);
  182. }
  183. void mxr_reg_vp_buffer(struct mxr_device *mdev,
  184. dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
  185. {
  186. u32 val = luma_addr[0] ? ~0 : 0;
  187. unsigned long flags;
  188. spin_lock_irqsave(&mdev->reg_slock, flags);
  189. mxr_vsync_set_update(mdev, MXR_DISABLE);
  190. mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
  191. vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
  192. /* TODO: fix tiled mode */
  193. vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
  194. vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
  195. vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
  196. vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
  197. mxr_vsync_set_update(mdev, MXR_ENABLE);
  198. spin_unlock_irqrestore(&mdev->reg_slock, flags);
  199. }
  200. static void mxr_irq_layer_handle(struct mxr_layer *layer)
  201. {
  202. struct list_head *head = &layer->enq_list;
  203. struct mxr_buffer *done;
  204. /* skip non-existing layer */
  205. if (layer == NULL)
  206. return;
  207. spin_lock(&layer->enq_slock);
  208. if (layer->state == MXR_LAYER_IDLE)
  209. goto done;
  210. done = layer->shadow_buf;
  211. layer->shadow_buf = layer->update_buf;
  212. if (list_empty(head)) {
  213. if (layer->state != MXR_LAYER_STREAMING)
  214. layer->update_buf = NULL;
  215. } else {
  216. struct mxr_buffer *next;
  217. next = list_first_entry(head, struct mxr_buffer, list);
  218. list_del(&next->list);
  219. layer->update_buf = next;
  220. }
  221. layer->ops.buffer_set(layer, layer->update_buf);
  222. if (done && done != layer->shadow_buf)
  223. vb2_buffer_done(&done->vb.vb2_buf, VB2_BUF_STATE_DONE);
  224. done:
  225. spin_unlock(&layer->enq_slock);
  226. }
  227. irqreturn_t mxr_irq_handler(int irq, void *dev_data)
  228. {
  229. struct mxr_device *mdev = dev_data;
  230. u32 i, val;
  231. spin_lock(&mdev->reg_slock);
  232. val = mxr_read(mdev, MXR_INT_STATUS);
  233. /* wake up process waiting for VSYNC */
  234. if (val & MXR_INT_STATUS_VSYNC) {
  235. set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
  236. /* toggle TOP field event if working in interlaced mode */
  237. if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
  238. change_bit(MXR_EVENT_TOP, &mdev->event_flags);
  239. wake_up(&mdev->event_queue);
  240. /* vsync interrupt use different bit for read and clear */
  241. val &= ~MXR_INT_STATUS_VSYNC;
  242. val |= MXR_INT_CLEAR_VSYNC;
  243. }
  244. /* clear interrupts */
  245. mxr_write(mdev, MXR_INT_STATUS, val);
  246. spin_unlock(&mdev->reg_slock);
  247. /* leave on non-vsync event */
  248. if (~val & MXR_INT_CLEAR_VSYNC)
  249. return IRQ_HANDLED;
  250. /* skip layer update on bottom field */
  251. if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
  252. return IRQ_HANDLED;
  253. for (i = 0; i < MXR_MAX_LAYERS; ++i)
  254. mxr_irq_layer_handle(mdev->layer[i]);
  255. return IRQ_HANDLED;
  256. }
  257. void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
  258. {
  259. u32 val;
  260. val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
  261. mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
  262. }
  263. void mxr_reg_streamon(struct mxr_device *mdev)
  264. {
  265. unsigned long flags;
  266. spin_lock_irqsave(&mdev->reg_slock, flags);
  267. /* single write -> no need to block vsync update */
  268. /* start MIXER */
  269. mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
  270. set_bit(MXR_EVENT_TOP, &mdev->event_flags);
  271. spin_unlock_irqrestore(&mdev->reg_slock, flags);
  272. }
  273. void mxr_reg_streamoff(struct mxr_device *mdev)
  274. {
  275. unsigned long flags;
  276. spin_lock_irqsave(&mdev->reg_slock, flags);
  277. /* single write -> no need to block vsync update */
  278. /* stop MIXER */
  279. mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
  280. spin_unlock_irqrestore(&mdev->reg_slock, flags);
  281. }
  282. int mxr_reg_wait4vsync(struct mxr_device *mdev)
  283. {
  284. long time_left;
  285. clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
  286. /* TODO: consider adding interruptible */
  287. time_left = wait_event_timeout(mdev->event_queue,
  288. test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
  289. msecs_to_jiffies(1000));
  290. if (time_left > 0)
  291. return 0;
  292. mxr_warn(mdev, "no vsync detected - timeout\n");
  293. return -ETIME;
  294. }
  295. void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
  296. struct v4l2_mbus_framefmt *fmt)
  297. {
  298. u32 val = 0;
  299. unsigned long flags;
  300. spin_lock_irqsave(&mdev->reg_slock, flags);
  301. mxr_vsync_set_update(mdev, MXR_DISABLE);
  302. /* selecting colorspace accepted by output */
  303. if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
  304. val |= MXR_CFG_OUT_YUV444;
  305. else
  306. val |= MXR_CFG_OUT_RGB888;
  307. /* choosing between interlace and progressive mode */
  308. if (fmt->field == V4L2_FIELD_INTERLACED)
  309. val |= MXR_CFG_SCAN_INTERLACE;
  310. else
  311. val |= MXR_CFG_SCAN_PROGRASSIVE;
  312. /* choosing between porper HD and SD mode */
  313. if (fmt->height == 480)
  314. val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
  315. else if (fmt->height == 576)
  316. val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
  317. else if (fmt->height == 720)
  318. val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
  319. else if (fmt->height == 1080)
  320. val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
  321. else
  322. WARN(1, "unrecognized mbus height %u!\n", fmt->height);
  323. mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
  324. MXR_CFG_OUT_MASK);
  325. val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
  326. vp_write_mask(mdev, VP_MODE, val,
  327. VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
  328. mxr_vsync_set_update(mdev, MXR_ENABLE);
  329. spin_unlock_irqrestore(&mdev->reg_slock, flags);
  330. }
  331. void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
  332. {
  333. /* no extra actions need to be done */
  334. }
  335. void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
  336. {
  337. /* no extra actions need to be done */
  338. }
  339. static const u8 filter_y_horiz_tap8[] = {
  340. 0, -1, -1, -1, -1, -1, -1, -1,
  341. -1, -1, -1, -1, -1, 0, 0, 0,
  342. 0, 2, 4, 5, 6, 6, 6, 6,
  343. 6, 5, 5, 4, 3, 2, 1, 1,
  344. 0, -6, -12, -16, -18, -20, -21, -20,
  345. -20, -18, -16, -13, -10, -8, -5, -2,
  346. 127, 126, 125, 121, 114, 107, 99, 89,
  347. 79, 68, 57, 46, 35, 25, 16, 8,
  348. };
  349. static const u8 filter_y_vert_tap4[] = {
  350. 0, -3, -6, -8, -8, -8, -8, -7,
  351. -6, -5, -4, -3, -2, -1, -1, 0,
  352. 127, 126, 124, 118, 111, 102, 92, 81,
  353. 70, 59, 48, 37, 27, 19, 11, 5,
  354. 0, 5, 11, 19, 27, 37, 48, 59,
  355. 70, 81, 92, 102, 111, 118, 124, 126,
  356. 0, 0, -1, -1, -2, -3, -4, -5,
  357. -6, -7, -8, -8, -8, -8, -6, -3,
  358. };
  359. static const u8 filter_cr_horiz_tap4[] = {
  360. 0, -3, -6, -8, -8, -8, -8, -7,
  361. -6, -5, -4, -3, -2, -1, -1, 0,
  362. 127, 126, 124, 118, 111, 102, 92, 81,
  363. 70, 59, 48, 37, 27, 19, 11, 5,
  364. };
  365. static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
  366. int reg_id, const u8 *data, unsigned int size)
  367. {
  368. /* assure 4-byte align */
  369. BUG_ON(size & 3);
  370. for (; size; size -= 4, reg_id += 4, data += 4) {
  371. u32 val = (data[0] << 24) | (data[1] << 16) |
  372. (data[2] << 8) | data[3];
  373. vp_write(mdev, reg_id, val);
  374. }
  375. }
  376. static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
  377. {
  378. mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
  379. filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
  380. mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
  381. filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
  382. mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
  383. filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
  384. }
  385. static void mxr_reg_mxr_dump(struct mxr_device *mdev)
  386. {
  387. #define DUMPREG(reg_id) \
  388. do { \
  389. mxr_dbg(mdev, #reg_id " = %08x\n", \
  390. (u32)readl(mdev->res.mxr_regs + reg_id)); \
  391. } while (0)
  392. DUMPREG(MXR_STATUS);
  393. DUMPREG(MXR_CFG);
  394. DUMPREG(MXR_INT_EN);
  395. DUMPREG(MXR_INT_STATUS);
  396. DUMPREG(MXR_LAYER_CFG);
  397. DUMPREG(MXR_VIDEO_CFG);
  398. DUMPREG(MXR_GRAPHIC0_CFG);
  399. DUMPREG(MXR_GRAPHIC0_BASE);
  400. DUMPREG(MXR_GRAPHIC0_SPAN);
  401. DUMPREG(MXR_GRAPHIC0_WH);
  402. DUMPREG(MXR_GRAPHIC0_SXY);
  403. DUMPREG(MXR_GRAPHIC0_DXY);
  404. DUMPREG(MXR_GRAPHIC1_CFG);
  405. DUMPREG(MXR_GRAPHIC1_BASE);
  406. DUMPREG(MXR_GRAPHIC1_SPAN);
  407. DUMPREG(MXR_GRAPHIC1_WH);
  408. DUMPREG(MXR_GRAPHIC1_SXY);
  409. DUMPREG(MXR_GRAPHIC1_DXY);
  410. #undef DUMPREG
  411. }
  412. static void mxr_reg_vp_dump(struct mxr_device *mdev)
  413. {
  414. #define DUMPREG(reg_id) \
  415. do { \
  416. mxr_dbg(mdev, #reg_id " = %08x\n", \
  417. (u32) readl(mdev->res.vp_regs + reg_id)); \
  418. } while (0)
  419. DUMPREG(VP_ENABLE);
  420. DUMPREG(VP_SRESET);
  421. DUMPREG(VP_SHADOW_UPDATE);
  422. DUMPREG(VP_FIELD_ID);
  423. DUMPREG(VP_MODE);
  424. DUMPREG(VP_IMG_SIZE_Y);
  425. DUMPREG(VP_IMG_SIZE_C);
  426. DUMPREG(VP_PER_RATE_CTRL);
  427. DUMPREG(VP_TOP_Y_PTR);
  428. DUMPREG(VP_BOT_Y_PTR);
  429. DUMPREG(VP_TOP_C_PTR);
  430. DUMPREG(VP_BOT_C_PTR);
  431. DUMPREG(VP_ENDIAN_MODE);
  432. DUMPREG(VP_SRC_H_POSITION);
  433. DUMPREG(VP_SRC_V_POSITION);
  434. DUMPREG(VP_SRC_WIDTH);
  435. DUMPREG(VP_SRC_HEIGHT);
  436. DUMPREG(VP_DST_H_POSITION);
  437. DUMPREG(VP_DST_V_POSITION);
  438. DUMPREG(VP_DST_WIDTH);
  439. DUMPREG(VP_DST_HEIGHT);
  440. DUMPREG(VP_H_RATIO);
  441. DUMPREG(VP_V_RATIO);
  442. #undef DUMPREG
  443. }
  444. void mxr_reg_dump(struct mxr_device *mdev)
  445. {
  446. mxr_reg_mxr_dump(mdev);
  447. mxr_reg_vp_dump(mdev);
  448. }