saa7146_hlp.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2. #include <linux/kernel.h>
  3. #include <linux/export.h>
  4. #include <media/saa7146_vv.h>
  5. static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
  6. {
  7. /* clear out the necessary bits */
  8. *clip_format &= 0x0000ffff;
  9. /* set these bits new */
  10. *clip_format |= (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16));
  11. }
  12. static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl)
  13. {
  14. *hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28);
  15. *hps_ctrl |= (source << 30) | (sync << 28);
  16. }
  17. static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl)
  18. {
  19. int hyo = 0, hxo = 0;
  20. hyo = vv->standard->v_offset;
  21. hxo = vv->standard->h_offset;
  22. *hps_h_scale &= ~(MASK_B0 | 0xf00);
  23. *hps_h_scale |= (hxo << 0);
  24. *hps_ctrl &= ~(MASK_W0 | MASK_B2);
  25. *hps_ctrl |= (hyo << 12);
  26. }
  27. /* helper functions for the calculation of the horizontal- and vertical
  28. scaling registers, clip-format-register etc ...
  29. these functions take pointers to the (most-likely read-out
  30. original-values) and manipulate them according to the requested
  31. changes.
  32. */
  33. /* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */
  34. static struct {
  35. u16 hps_coeff;
  36. u16 weight_sum;
  37. } hps_h_coeff_tab [] = {
  38. {0x00, 2}, {0x02, 4}, {0x00, 4}, {0x06, 8}, {0x02, 8},
  39. {0x08, 8}, {0x00, 8}, {0x1E, 16}, {0x0E, 8}, {0x26, 8},
  40. {0x06, 8}, {0x42, 8}, {0x02, 8}, {0x80, 8}, {0x00, 8},
  41. {0xFE, 16}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8},
  42. {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8},
  43. {0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8},
  44. {0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
  45. {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
  46. {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8},
  47. {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0x7E, 8},
  48. {0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8},
  49. {0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8},
  50. {0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16}
  51. };
  52. /* table of attenuation values for horizontal scaling */
  53. static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0};
  54. /* calculate horizontal scale registers */
  55. static int calculate_h_scale_registers(struct saa7146_dev *dev,
  56. int in_x, int out_x, int flip_lr,
  57. u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale)
  58. {
  59. /* horizontal prescaler */
  60. u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0;
  61. /* horizontal scaler */
  62. u32 xim = 0, xp = 0, xsci =0;
  63. /* vertical scale & gain */
  64. u32 pfuv = 0;
  65. /* helper variables */
  66. u32 h_atten = 0, i = 0;
  67. if ( 0 == out_x ) {
  68. return -EINVAL;
  69. }
  70. /* mask out vanity-bit */
  71. *hps_ctrl &= ~MASK_29;
  72. /* calculate prescale-(xspc)-value: [n .. 1/2) : 1
  73. [1/2 .. 1/3) : 2
  74. [1/3 .. 1/4) : 3
  75. ... */
  76. if (in_x > out_x) {
  77. xpsc = in_x / out_x;
  78. }
  79. else {
  80. /* zooming */
  81. xpsc = 1;
  82. }
  83. /* if flip_lr-bit is set, number of pixels after
  84. horizontal prescaling must be < 384 */
  85. if ( 0 != flip_lr ) {
  86. /* set vanity bit */
  87. *hps_ctrl |= MASK_29;
  88. while (in_x / xpsc >= 384 )
  89. xpsc++;
  90. }
  91. /* if zooming is wanted, number of pixels after
  92. horizontal prescaling must be < 768 */
  93. else {
  94. while ( in_x / xpsc >= 768 )
  95. xpsc++;
  96. }
  97. /* maximum prescale is 64 (p.69) */
  98. if ( xpsc > 64 )
  99. xpsc = 64;
  100. /* keep xacm clear*/
  101. xacm = 0;
  102. /* set horizontal filter parameters (CXY = CXUV) */
  103. cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff;
  104. cxuv = cxy;
  105. /* calculate and set horizontal fine scale (xsci) */
  106. /* bypass the horizontal scaler ? */
  107. if ( (in_x == out_x) && ( 1 == xpsc ) )
  108. xsci = 0x400;
  109. else
  110. xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc;
  111. /* set start phase for horizontal fine scale (xp) to 0 */
  112. xp = 0;
  113. /* set xim, if we bypass the horizontal scaler */
  114. if ( 0x400 == xsci )
  115. xim = 1;
  116. else
  117. xim = 0;
  118. /* if the prescaler is bypassed, enable horizontal
  119. accumulation mode (xacm) and clear dcgx */
  120. if( 1 == xpsc ) {
  121. xacm = 1;
  122. dcgx = 0;
  123. } else {
  124. xacm = 0;
  125. /* get best match in the table of attenuations
  126. for horizontal scaling */
  127. h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum;
  128. for (i = 0; h_attenuation[i] != 0; i++) {
  129. if (h_attenuation[i] >= h_atten)
  130. break;
  131. }
  132. dcgx = i;
  133. }
  134. /* the horizontal scaling increment controls the UV filter
  135. to reduce the bandwidth to improve the display quality,
  136. so set it ... */
  137. if ( xsci == 0x400)
  138. pfuv = 0x00;
  139. else if ( xsci < 0x600)
  140. pfuv = 0x01;
  141. else if ( xsci < 0x680)
  142. pfuv = 0x11;
  143. else if ( xsci < 0x700)
  144. pfuv = 0x22;
  145. else
  146. pfuv = 0x33;
  147. *hps_v_gain &= MASK_W0|MASK_B2;
  148. *hps_v_gain |= (pfuv << 24);
  149. *hps_h_scale &= ~(MASK_W1 | 0xf000);
  150. *hps_h_scale |= (xim << 31) | (xp << 24) | (xsci << 12);
  151. *hps_h_prescale |= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0);
  152. return 0;
  153. }
  154. static struct {
  155. u16 hps_coeff;
  156. u16 weight_sum;
  157. } hps_v_coeff_tab [] = {
  158. {0x0100, 2}, {0x0102, 4}, {0x0300, 4}, {0x0106, 8}, {0x0502, 8},
  159. {0x0708, 8}, {0x0F00, 8}, {0x011E, 16}, {0x110E, 16}, {0x1926, 16},
  160. {0x3906, 16}, {0x3D42, 16}, {0x7D02, 16}, {0x7F80, 16}, {0xFF00, 16},
  161. {0x01FE, 32}, {0x01FE, 32}, {0x817E, 32}, {0x817E, 32}, {0xC13E, 32},
  162. {0xC13E, 32}, {0xE11E, 32}, {0xE11E, 32}, {0xF10E, 32}, {0xF10E, 32},
  163. {0xF906, 32}, {0xF906, 32}, {0xFD02, 32}, {0xFD02, 32}, {0xFF00, 32},
  164. {0xFF00, 32}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
  165. {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
  166. {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64},
  167. {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x817E, 64},
  168. {0x817E, 64}, {0xC13E, 64}, {0xC13E, 64}, {0xE11E, 64}, {0xE11E, 64},
  169. {0xF10E, 64}, {0xF10E, 64}, {0xF906, 64}, {0xF906, 64}, {0xFD02, 64},
  170. {0xFD02, 64}, {0xFF00, 64}, {0xFF00, 64}, {0x01FE, 128}
  171. };
  172. /* table of attenuation values for vertical scaling */
  173. static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0};
  174. /* calculate vertical scale registers */
  175. static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field,
  176. int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain)
  177. {
  178. int lpi = 0;
  179. /* vertical scaling */
  180. u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0;
  181. /* vertical scale & gain */
  182. u32 dcgy = 0, cya_cyb = 0;
  183. /* helper variables */
  184. u32 v_atten = 0, i = 0;
  185. /* error, if vertical zooming */
  186. if ( in_y < out_y ) {
  187. return -EINVAL;
  188. }
  189. /* linear phase interpolation may be used
  190. if scaling is between 1 and 1/2 (both fields used)
  191. or scaling is between 1/2 and 1/4 (if only one field is used) */
  192. if (V4L2_FIELD_HAS_BOTH(field)) {
  193. if( 2*out_y >= in_y) {
  194. lpi = 1;
  195. }
  196. } else if (field == V4L2_FIELD_TOP
  197. || field == V4L2_FIELD_ALTERNATE
  198. || field == V4L2_FIELD_BOTTOM) {
  199. if( 4*out_y >= in_y ) {
  200. lpi = 1;
  201. }
  202. out_y *= 2;
  203. }
  204. if( 0 != lpi ) {
  205. yacm = 0;
  206. yacl = 0;
  207. cya_cyb = 0x00ff;
  208. /* calculate scaling increment */
  209. if ( in_y > out_y )
  210. ysci = ((1024 * in_y) / (out_y + 1)) - 1024;
  211. else
  212. ysci = 0;
  213. dcgy = 0;
  214. /* calculate ype and ypo */
  215. ype = ysci / 16;
  216. ypo = ype + (ysci / 64);
  217. } else {
  218. yacm = 1;
  219. /* calculate scaling increment */
  220. ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10;
  221. /* calculate ype and ypo */
  222. ypo = ype = ((ysci + 15) / 16);
  223. /* the sequence length interval (yacl) has to be set according
  224. to the prescale value, e.g. [n .. 1/2) : 0
  225. [1/2 .. 1/3) : 1
  226. [1/3 .. 1/4) : 2
  227. ... */
  228. if ( ysci < 512) {
  229. yacl = 0;
  230. } else {
  231. yacl = ( ysci / (1024 - ysci) );
  232. }
  233. /* get filter coefficients for cya, cyb from table hps_v_coeff_tab */
  234. cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff;
  235. /* get best match in the table of attenuations for vertical scaling */
  236. v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum;
  237. for (i = 0; v_attenuation[i] != 0; i++) {
  238. if (v_attenuation[i] >= v_atten)
  239. break;
  240. }
  241. dcgy = i;
  242. }
  243. /* ypo and ype swapped in spec ? */
  244. *hps_v_scale |= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1);
  245. *hps_v_gain &= ~(MASK_W0|MASK_B2);
  246. *hps_v_gain |= (dcgy << 16) | (cya_cyb << 0);
  247. return 0;
  248. }
  249. /* simple bubble-sort algorithm with duplicate elimination */
  250. static int sort_and_eliminate(u32* values, int* count)
  251. {
  252. int low = 0, high = 0, top = 0;
  253. int cur = 0, next = 0;
  254. /* sanity checks */
  255. if( (0 > *count) || (NULL == values) ) {
  256. return -EINVAL;
  257. }
  258. /* bubble sort the first @count items of the array @values */
  259. for( top = *count; top > 0; top--) {
  260. for( low = 0, high = 1; high < top; low++, high++) {
  261. if( values[low] > values[high] )
  262. swap(values[low], values[high]);
  263. }
  264. }
  265. /* remove duplicate items */
  266. for( cur = 0, next = 1; next < *count; next++) {
  267. if( values[cur] != values[next])
  268. values[++cur] = values[next];
  269. }
  270. *count = cur + 1;
  271. return 0;
  272. }
  273. static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct saa7146_fh *fh,
  274. struct saa7146_video_dma *vdma2, u32* clip_format, u32* arbtr_ctrl, enum v4l2_field field)
  275. {
  276. struct saa7146_vv *vv = dev->vv_data;
  277. __le32 *clipping = vv->d_clipping.cpu_addr;
  278. int width = vv->ov.win.w.width;
  279. int height = vv->ov.win.w.height;
  280. int clipcount = vv->ov.nclips;
  281. u32 line_list[32];
  282. u32 pixel_list[32];
  283. int numdwords = 0;
  284. int i = 0, j = 0;
  285. int cnt_line = 0, cnt_pixel = 0;
  286. int x[32], y[32], w[32], h[32];
  287. /* clear out memory */
  288. memset(&line_list[0], 0x00, sizeof(u32)*32);
  289. memset(&pixel_list[0], 0x00, sizeof(u32)*32);
  290. memset(clipping, 0x00, SAA7146_CLIPPING_MEM);
  291. /* fill the line and pixel-lists */
  292. for(i = 0; i < clipcount; i++) {
  293. int l = 0, r = 0, t = 0, b = 0;
  294. x[i] = vv->ov.clips[i].c.left;
  295. y[i] = vv->ov.clips[i].c.top;
  296. w[i] = vv->ov.clips[i].c.width;
  297. h[i] = vv->ov.clips[i].c.height;
  298. if( w[i] < 0) {
  299. x[i] += w[i]; w[i] = -w[i];
  300. }
  301. if( h[i] < 0) {
  302. y[i] += h[i]; h[i] = -h[i];
  303. }
  304. if( x[i] < 0) {
  305. w[i] += x[i]; x[i] = 0;
  306. }
  307. if( y[i] < 0) {
  308. h[i] += y[i]; y[i] = 0;
  309. }
  310. if( 0 != vv->vflip ) {
  311. y[i] = height - y[i] - h[i];
  312. }
  313. l = x[i];
  314. r = x[i]+w[i];
  315. t = y[i];
  316. b = y[i]+h[i];
  317. /* insert left/right coordinates */
  318. pixel_list[ 2*i ] = min_t(int, l, width);
  319. pixel_list[(2*i)+1] = min_t(int, r, width);
  320. /* insert top/bottom coordinates */
  321. line_list[ 2*i ] = min_t(int, t, height);
  322. line_list[(2*i)+1] = min_t(int, b, height);
  323. }
  324. /* sort and eliminate lists */
  325. cnt_line = cnt_pixel = 2*clipcount;
  326. sort_and_eliminate( &pixel_list[0], &cnt_pixel );
  327. sort_and_eliminate( &line_list[0], &cnt_line );
  328. /* calculate the number of used u32s */
  329. numdwords = max_t(int, (cnt_line+1), (cnt_pixel+1))*2;
  330. numdwords = max_t(int, 4, numdwords);
  331. numdwords = min_t(int, 64, numdwords);
  332. /* fill up cliptable */
  333. for(i = 0; i < cnt_pixel; i++) {
  334. clipping[2*i] |= cpu_to_le32(pixel_list[i] << 16);
  335. }
  336. for(i = 0; i < cnt_line; i++) {
  337. clipping[(2*i)+1] |= cpu_to_le32(line_list[i] << 16);
  338. }
  339. /* fill up cliptable with the display infos */
  340. for(j = 0; j < clipcount; j++) {
  341. for(i = 0; i < cnt_pixel; i++) {
  342. if( x[j] < 0)
  343. x[j] = 0;
  344. if( pixel_list[i] < (x[j] + w[j])) {
  345. if ( pixel_list[i] >= x[j] ) {
  346. clipping[2*i] |= cpu_to_le32(1 << j);
  347. }
  348. }
  349. }
  350. for(i = 0; i < cnt_line; i++) {
  351. if( y[j] < 0)
  352. y[j] = 0;
  353. if( line_list[i] < (y[j] + h[j]) ) {
  354. if( line_list[i] >= y[j] ) {
  355. clipping[(2*i)+1] |= cpu_to_le32(1 << j);
  356. }
  357. }
  358. }
  359. }
  360. /* adjust arbitration control register */
  361. *arbtr_ctrl &= 0xffff00ff;
  362. *arbtr_ctrl |= 0x00001c00;
  363. vdma2->base_even = vv->d_clipping.dma_handle;
  364. vdma2->base_odd = vv->d_clipping.dma_handle;
  365. vdma2->prot_addr = vv->d_clipping.dma_handle+((sizeof(u32))*(numdwords));
  366. vdma2->base_page = 0x04;
  367. vdma2->pitch = 0x00;
  368. vdma2->num_line_byte = (0 << 16 | (sizeof(u32))*(numdwords-1) );
  369. /* set clipping-mode. this depends on the field(s) used */
  370. *clip_format &= 0xfffffff7;
  371. if (V4L2_FIELD_HAS_BOTH(field)) {
  372. *clip_format |= 0x00000008;
  373. } else {
  374. *clip_format |= 0x00000000;
  375. }
  376. }
  377. /* disable clipping */
  378. static void saa7146_disable_clipping(struct saa7146_dev *dev)
  379. {
  380. u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
  381. /* mask out relevant bits (=lower word)*/
  382. clip_format &= MASK_W1;
  383. /* upload clipping-registers*/
  384. saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format);
  385. saa7146_write(dev, MC2, (MASK_05 | MASK_21));
  386. /* disable video dma2 */
  387. saa7146_write(dev, MC1, MASK_21);
  388. }
  389. static void saa7146_set_clipping_rect(struct saa7146_fh *fh)
  390. {
  391. struct saa7146_dev *dev = fh->dev;
  392. struct saa7146_vv *vv = dev->vv_data;
  393. enum v4l2_field field = vv->ov.win.field;
  394. struct saa7146_video_dma vdma2;
  395. u32 clip_format;
  396. u32 arbtr_ctrl;
  397. /* check clipcount, disable clipping if clipcount == 0*/
  398. if (vv->ov.nclips == 0) {
  399. saa7146_disable_clipping(dev);
  400. return;
  401. }
  402. clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
  403. arbtr_ctrl = saa7146_read(dev, PCI_BT_V1);
  404. calculate_clipping_registers_rect(dev, fh, &vdma2, &clip_format, &arbtr_ctrl, field);
  405. /* set clipping format */
  406. clip_format &= 0xffff0008;
  407. clip_format |= (SAA7146_CLIPPING_RECT << 4);
  408. /* prepare video dma2 */
  409. saa7146_write(dev, BASE_EVEN2, vdma2.base_even);
  410. saa7146_write(dev, BASE_ODD2, vdma2.base_odd);
  411. saa7146_write(dev, PROT_ADDR2, vdma2.prot_addr);
  412. saa7146_write(dev, BASE_PAGE2, vdma2.base_page);
  413. saa7146_write(dev, PITCH2, vdma2.pitch);
  414. saa7146_write(dev, NUM_LINE_BYTE2, vdma2.num_line_byte);
  415. /* prepare the rest */
  416. saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format);
  417. saa7146_write(dev, PCI_BT_V1, arbtr_ctrl);
  418. /* upload clip_control-register, clipping-registers, enable video dma2 */
  419. saa7146_write(dev, MC2, (MASK_05 | MASK_21 | MASK_03 | MASK_19));
  420. saa7146_write(dev, MC1, (MASK_05 | MASK_21));
  421. }
  422. static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, enum v4l2_field field)
  423. {
  424. struct saa7146_vv *vv = dev->vv_data;
  425. int source = vv->current_hps_source;
  426. int sync = vv->current_hps_sync;
  427. u32 hps_v_scale = 0, hps_v_gain = 0, hps_ctrl = 0, hps_h_prescale = 0, hps_h_scale = 0;
  428. /* set vertical scale */
  429. hps_v_scale = 0; /* all bits get set by the function-call */
  430. hps_v_gain = 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/
  431. calculate_v_scale_registers(dev, field, vv->standard->v_field*2, height, &hps_v_scale, &hps_v_gain);
  432. /* set horizontal scale */
  433. hps_ctrl = 0;
  434. hps_h_prescale = 0; /* all bits get set in the function */
  435. hps_h_scale = 0;
  436. calculate_h_scale_registers(dev, vv->standard->h_pixels, width, vv->hflip, &hps_ctrl, &hps_v_gain, &hps_h_prescale, &hps_h_scale);
  437. /* set hyo and hxo */
  438. calculate_hxo_and_hyo(vv, &hps_h_scale, &hps_ctrl);
  439. calculate_hps_source_and_sync(dev, source, sync, &hps_ctrl);
  440. /* write out new register contents */
  441. saa7146_write(dev, HPS_V_SCALE, hps_v_scale);
  442. saa7146_write(dev, HPS_V_GAIN, hps_v_gain);
  443. saa7146_write(dev, HPS_CTRL, hps_ctrl);
  444. saa7146_write(dev, HPS_H_PRESCALE,hps_h_prescale);
  445. saa7146_write(dev, HPS_H_SCALE, hps_h_scale);
  446. /* upload shadow-ram registers */
  447. saa7146_write(dev, MC2, (MASK_05 | MASK_06 | MASK_21 | MASK_22) );
  448. }
  449. /* calculate the new memory offsets for a desired position */
  450. static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat)
  451. {
  452. struct saa7146_vv *vv = dev->vv_data;
  453. struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat);
  454. int b_depth = vv->ov_fmt->depth;
  455. int b_bpl = vv->ov_fb.fmt.bytesperline;
  456. /* The unsigned long cast is to remove a 64-bit compile warning since
  457. it looks like a 64-bit address is cast to a 32-bit value, even
  458. though the base pointer is really a 32-bit physical address that
  459. goes into a 32-bit DMA register.
  460. FIXME: might not work on some 64-bit platforms, but see the FIXME
  461. in struct v4l2_framebuffer (videodev2.h) for that.
  462. */
  463. u32 base = (u32)(unsigned long)vv->ov_fb.base;
  464. struct saa7146_video_dma vdma1;
  465. /* calculate memory offsets for picture, look if we shall top-down-flip */
  466. vdma1.pitch = 2*b_bpl;
  467. if ( 0 == vv->vflip ) {
  468. vdma1.base_even = base + (w_y * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
  469. vdma1.base_odd = vdma1.base_even + (vdma1.pitch / 2);
  470. vdma1.prot_addr = vdma1.base_even + (w_height * (vdma1.pitch / 2));
  471. }
  472. else {
  473. vdma1.base_even = base + ((w_y+w_height) * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
  474. vdma1.base_odd = vdma1.base_even - (vdma1.pitch / 2);
  475. vdma1.prot_addr = vdma1.base_odd - (w_height * (vdma1.pitch / 2));
  476. }
  477. if (V4L2_FIELD_HAS_BOTH(field)) {
  478. } else if (field == V4L2_FIELD_ALTERNATE) {
  479. /* fixme */
  480. vdma1.base_odd = vdma1.prot_addr;
  481. vdma1.pitch /= 2;
  482. } else if (field == V4L2_FIELD_TOP) {
  483. vdma1.base_odd = vdma1.prot_addr;
  484. vdma1.pitch /= 2;
  485. } else if (field == V4L2_FIELD_BOTTOM) {
  486. vdma1.base_odd = vdma1.base_even;
  487. vdma1.base_even = vdma1.prot_addr;
  488. vdma1.pitch /= 2;
  489. }
  490. if ( 0 != vv->vflip ) {
  491. vdma1.pitch *= -1;
  492. }
  493. vdma1.base_page = sfmt->swap;
  494. vdma1.num_line_byte = (vv->standard->v_field<<16)+vv->standard->h_pixels;
  495. saa7146_write_out_dma(dev, 1, &vdma1);
  496. }
  497. static void saa7146_set_output_format(struct saa7146_dev *dev, unsigned long palette)
  498. {
  499. u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL);
  500. /* call helper function */
  501. calculate_output_format_register(dev,palette,&clip_format);
  502. /* update the hps registers */
  503. saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format);
  504. saa7146_write(dev, MC2, (MASK_05 | MASK_21));
  505. }
  506. /* select input-source */
  507. void saa7146_set_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync)
  508. {
  509. struct saa7146_vv *vv = dev->vv_data;
  510. u32 hps_ctrl = 0;
  511. /* read old state */
  512. hps_ctrl = saa7146_read(dev, HPS_CTRL);
  513. hps_ctrl &= ~( MASK_31 | MASK_30 | MASK_28 );
  514. hps_ctrl |= (source << 30) | (sync << 28);
  515. /* write back & upload register */
  516. saa7146_write(dev, HPS_CTRL, hps_ctrl);
  517. saa7146_write(dev, MC2, (MASK_05 | MASK_21));
  518. vv->current_hps_source = source;
  519. vv->current_hps_sync = sync;
  520. }
  521. EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync);
  522. int saa7146_enable_overlay(struct saa7146_fh *fh)
  523. {
  524. struct saa7146_dev *dev = fh->dev;
  525. struct saa7146_vv *vv = dev->vv_data;
  526. saa7146_set_window(dev, vv->ov.win.w.width, vv->ov.win.w.height, vv->ov.win.field);
  527. saa7146_set_position(dev, vv->ov.win.w.left, vv->ov.win.w.top, vv->ov.win.w.height, vv->ov.win.field, vv->ov_fmt->pixelformat);
  528. saa7146_set_output_format(dev, vv->ov_fmt->trans);
  529. saa7146_set_clipping_rect(fh);
  530. /* enable video dma1 */
  531. saa7146_write(dev, MC1, (MASK_06 | MASK_22));
  532. return 0;
  533. }
  534. void saa7146_disable_overlay(struct saa7146_fh *fh)
  535. {
  536. struct saa7146_dev *dev = fh->dev;
  537. /* disable clipping + video dma1 */
  538. saa7146_disable_clipping(dev);
  539. saa7146_write(dev, MC1, MASK_22);
  540. }
  541. void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma)
  542. {
  543. int where = 0;
  544. if( which < 1 || which > 3) {
  545. return;
  546. }
  547. /* calculate starting address */
  548. where = (which-1)*0x18;
  549. saa7146_write(dev, where, vdma->base_odd);
  550. saa7146_write(dev, where+0x04, vdma->base_even);
  551. saa7146_write(dev, where+0x08, vdma->prot_addr);
  552. saa7146_write(dev, where+0x0c, vdma->pitch);
  553. saa7146_write(dev, where+0x10, vdma->base_page);
  554. saa7146_write(dev, where+0x14, vdma->num_line_byte);
  555. /* upload */
  556. saa7146_write(dev, MC2, (MASK_02<<(which-1))|(MASK_18<<(which-1)));
  557. /*
  558. printk("vdma%d.base_even: 0x%08x\n", which,vdma->base_even);
  559. printk("vdma%d.base_odd: 0x%08x\n", which,vdma->base_odd);
  560. printk("vdma%d.prot_addr: 0x%08x\n", which,vdma->prot_addr);
  561. printk("vdma%d.base_page: 0x%08x\n", which,vdma->base_page);
  562. printk("vdma%d.pitch: 0x%08x\n", which,vdma->pitch);
  563. printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte);
  564. */
  565. }
  566. static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa7146_buf *buf)
  567. {
  568. struct saa7146_vv *vv = dev->vv_data;
  569. struct saa7146_video_dma vdma1;
  570. struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
  571. int width = buf->fmt->width;
  572. int height = buf->fmt->height;
  573. int bytesperline = buf->fmt->bytesperline;
  574. enum v4l2_field field = buf->fmt->field;
  575. int depth = sfmt->depth;
  576. DEB_CAP("[size=%dx%d,fields=%s]\n",
  577. width, height, v4l2_field_names[field]);
  578. if( bytesperline != 0) {
  579. vdma1.pitch = bytesperline*2;
  580. } else {
  581. vdma1.pitch = (width*depth*2)/8;
  582. }
  583. vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels);
  584. vdma1.base_page = buf->pt[0].dma | ME1 | sfmt->swap;
  585. if( 0 != vv->vflip ) {
  586. vdma1.prot_addr = buf->pt[0].offset;
  587. vdma1.base_even = buf->pt[0].offset+(vdma1.pitch/2)*height;
  588. vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2);
  589. } else {
  590. vdma1.base_even = buf->pt[0].offset;
  591. vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2);
  592. vdma1.prot_addr = buf->pt[0].offset+(vdma1.pitch/2)*height;
  593. }
  594. if (V4L2_FIELD_HAS_BOTH(field)) {
  595. } else if (field == V4L2_FIELD_ALTERNATE) {
  596. /* fixme */
  597. if ( vv->last_field == V4L2_FIELD_TOP ) {
  598. vdma1.base_odd = vdma1.prot_addr;
  599. vdma1.pitch /= 2;
  600. } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
  601. vdma1.base_odd = vdma1.base_even;
  602. vdma1.base_even = vdma1.prot_addr;
  603. vdma1.pitch /= 2;
  604. }
  605. } else if (field == V4L2_FIELD_TOP) {
  606. vdma1.base_odd = vdma1.prot_addr;
  607. vdma1.pitch /= 2;
  608. } else if (field == V4L2_FIELD_BOTTOM) {
  609. vdma1.base_odd = vdma1.base_even;
  610. vdma1.base_even = vdma1.prot_addr;
  611. vdma1.pitch /= 2;
  612. }
  613. if( 0 != vv->vflip ) {
  614. vdma1.pitch *= -1;
  615. }
  616. saa7146_write_out_dma(dev, 1, &vdma1);
  617. return 0;
  618. }
  619. static int calc_planar_422(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
  620. {
  621. int height = buf->fmt->height;
  622. int width = buf->fmt->width;
  623. vdma2->pitch = width;
  624. vdma3->pitch = width;
  625. /* fixme: look at bytesperline! */
  626. if( 0 != vv->vflip ) {
  627. vdma2->prot_addr = buf->pt[1].offset;
  628. vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[1].offset;
  629. vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2);
  630. vdma3->prot_addr = buf->pt[2].offset;
  631. vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[2].offset;
  632. vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2);
  633. } else {
  634. vdma3->base_even = buf->pt[2].offset;
  635. vdma3->base_odd = vdma3->base_even + (vdma3->pitch/2);
  636. vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset;
  637. vdma2->base_even = buf->pt[1].offset;
  638. vdma2->base_odd = vdma2->base_even + (vdma2->pitch/2);
  639. vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset;
  640. }
  641. return 0;
  642. }
  643. static int calc_planar_420(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3)
  644. {
  645. int height = buf->fmt->height;
  646. int width = buf->fmt->width;
  647. vdma2->pitch = width/2;
  648. vdma3->pitch = width/2;
  649. if( 0 != vv->vflip ) {
  650. vdma2->prot_addr = buf->pt[2].offset;
  651. vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[2].offset;
  652. vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2);
  653. vdma3->prot_addr = buf->pt[1].offset;
  654. vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[1].offset;
  655. vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2);
  656. } else {
  657. vdma3->base_even = buf->pt[2].offset;
  658. vdma3->base_odd = vdma3->base_even + (vdma3->pitch);
  659. vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset;
  660. vdma2->base_even = buf->pt[1].offset;
  661. vdma2->base_odd = vdma2->base_even + (vdma2->pitch);
  662. vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset;
  663. }
  664. return 0;
  665. }
  666. static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa7146_buf *buf)
  667. {
  668. struct saa7146_vv *vv = dev->vv_data;
  669. struct saa7146_video_dma vdma1;
  670. struct saa7146_video_dma vdma2;
  671. struct saa7146_video_dma vdma3;
  672. struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
  673. int width = buf->fmt->width;
  674. int height = buf->fmt->height;
  675. enum v4l2_field field = buf->fmt->field;
  676. BUG_ON(0 == buf->pt[0].dma);
  677. BUG_ON(0 == buf->pt[1].dma);
  678. BUG_ON(0 == buf->pt[2].dma);
  679. DEB_CAP("[size=%dx%d,fields=%s]\n",
  680. width, height, v4l2_field_names[field]);
  681. /* fixme: look at bytesperline! */
  682. /* fixme: what happens for user space buffers here?. The offsets are
  683. most likely wrong, this version here only works for page-aligned
  684. buffers, modifications to the pagetable-functions are necessary...*/
  685. vdma1.pitch = width*2;
  686. vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels);
  687. vdma1.base_page = buf->pt[0].dma | ME1;
  688. if( 0 != vv->vflip ) {
  689. vdma1.prot_addr = buf->pt[0].offset;
  690. vdma1.base_even = ((vdma1.pitch/2)*height)+buf->pt[0].offset;
  691. vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2);
  692. } else {
  693. vdma1.base_even = buf->pt[0].offset;
  694. vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2);
  695. vdma1.prot_addr = (vdma1.pitch/2)*height+buf->pt[0].offset;
  696. }
  697. vdma2.num_line_byte = 0; /* unused */
  698. vdma2.base_page = buf->pt[1].dma | ME1;
  699. vdma3.num_line_byte = 0; /* unused */
  700. vdma3.base_page = buf->pt[2].dma | ME1;
  701. switch( sfmt->depth ) {
  702. case 12: {
  703. calc_planar_420(vv,buf,&vdma2,&vdma3);
  704. break;
  705. }
  706. case 16: {
  707. calc_planar_422(vv,buf,&vdma2,&vdma3);
  708. break;
  709. }
  710. default: {
  711. return -1;
  712. }
  713. }
  714. if (V4L2_FIELD_HAS_BOTH(field)) {
  715. } else if (field == V4L2_FIELD_ALTERNATE) {
  716. /* fixme */
  717. vdma1.base_odd = vdma1.prot_addr;
  718. vdma1.pitch /= 2;
  719. vdma2.base_odd = vdma2.prot_addr;
  720. vdma2.pitch /= 2;
  721. vdma3.base_odd = vdma3.prot_addr;
  722. vdma3.pitch /= 2;
  723. } else if (field == V4L2_FIELD_TOP) {
  724. vdma1.base_odd = vdma1.prot_addr;
  725. vdma1.pitch /= 2;
  726. vdma2.base_odd = vdma2.prot_addr;
  727. vdma2.pitch /= 2;
  728. vdma3.base_odd = vdma3.prot_addr;
  729. vdma3.pitch /= 2;
  730. } else if (field == V4L2_FIELD_BOTTOM) {
  731. vdma1.base_odd = vdma1.base_even;
  732. vdma1.base_even = vdma1.prot_addr;
  733. vdma1.pitch /= 2;
  734. vdma2.base_odd = vdma2.base_even;
  735. vdma2.base_even = vdma2.prot_addr;
  736. vdma2.pitch /= 2;
  737. vdma3.base_odd = vdma3.base_even;
  738. vdma3.base_even = vdma3.prot_addr;
  739. vdma3.pitch /= 2;
  740. }
  741. if( 0 != vv->vflip ) {
  742. vdma1.pitch *= -1;
  743. vdma2.pitch *= -1;
  744. vdma3.pitch *= -1;
  745. }
  746. saa7146_write_out_dma(dev, 1, &vdma1);
  747. if( (sfmt->flags & FORMAT_BYTE_SWAP) != 0 ) {
  748. saa7146_write_out_dma(dev, 3, &vdma2);
  749. saa7146_write_out_dma(dev, 2, &vdma3);
  750. } else {
  751. saa7146_write_out_dma(dev, 2, &vdma2);
  752. saa7146_write_out_dma(dev, 3, &vdma3);
  753. }
  754. return 0;
  755. }
  756. static void program_capture_engine(struct saa7146_dev *dev, int planar)
  757. {
  758. struct saa7146_vv *vv = dev->vv_data;
  759. int count = 0;
  760. unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B;
  761. unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B;
  762. /* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/
  763. WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | o_wait);
  764. WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | e_wait);
  765. /* set rps register 0 */
  766. WRITE_RPS0(CMD_WR_REG | (1 << 8) | (MC2/4));
  767. WRITE_RPS0(MASK_27 | MASK_11);
  768. /* turn on video-dma1 */
  769. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  770. WRITE_RPS0(MASK_06 | MASK_22); /* => mask */
  771. WRITE_RPS0(MASK_06 | MASK_22); /* => values */
  772. if( 0 != planar ) {
  773. /* turn on video-dma2 */
  774. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  775. WRITE_RPS0(MASK_05 | MASK_21); /* => mask */
  776. WRITE_RPS0(MASK_05 | MASK_21); /* => values */
  777. /* turn on video-dma3 */
  778. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  779. WRITE_RPS0(MASK_04 | MASK_20); /* => mask */
  780. WRITE_RPS0(MASK_04 | MASK_20); /* => values */
  781. }
  782. /* wait for o_fid_a/b / e_fid_a/b toggle */
  783. if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
  784. WRITE_RPS0(CMD_PAUSE | o_wait);
  785. WRITE_RPS0(CMD_PAUSE | e_wait);
  786. } else if ( vv->last_field == V4L2_FIELD_TOP ) {
  787. WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
  788. WRITE_RPS0(CMD_PAUSE | o_wait);
  789. } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
  790. WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09));
  791. WRITE_RPS0(CMD_PAUSE | e_wait);
  792. }
  793. /* turn off video-dma1 */
  794. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  795. WRITE_RPS0(MASK_22 | MASK_06); /* => mask */
  796. WRITE_RPS0(MASK_22); /* => values */
  797. if( 0 != planar ) {
  798. /* turn off video-dma2 */
  799. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  800. WRITE_RPS0(MASK_05 | MASK_21); /* => mask */
  801. WRITE_RPS0(MASK_21); /* => values */
  802. /* turn off video-dma3 */
  803. WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4));
  804. WRITE_RPS0(MASK_04 | MASK_20); /* => mask */
  805. WRITE_RPS0(MASK_20); /* => values */
  806. }
  807. /* generate interrupt */
  808. WRITE_RPS0(CMD_INTERRUPT);
  809. /* stop */
  810. WRITE_RPS0(CMD_STOP);
  811. }
  812. void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
  813. {
  814. struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
  815. struct saa7146_vv *vv = dev->vv_data;
  816. u32 vdma1_prot_addr;
  817. DEB_CAP("buf:%p, next:%p\n", buf, next);
  818. vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1);
  819. if( 0 == vdma1_prot_addr ) {
  820. /* clear out beginning of streaming bit (rps register 0)*/
  821. DEB_CAP("forcing sync to new frame\n");
  822. saa7146_write(dev, MC2, MASK_27 );
  823. }
  824. saa7146_set_window(dev, buf->fmt->width, buf->fmt->height, buf->fmt->field);
  825. saa7146_set_output_format(dev, sfmt->trans);
  826. saa7146_disable_clipping(dev);
  827. if ( vv->last_field == V4L2_FIELD_INTERLACED ) {
  828. } else if ( vv->last_field == V4L2_FIELD_TOP ) {
  829. vv->last_field = V4L2_FIELD_BOTTOM;
  830. } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) {
  831. vv->last_field = V4L2_FIELD_TOP;
  832. }
  833. if( 0 != IS_PLANAR(sfmt->trans)) {
  834. calculate_video_dma_grab_planar(dev, buf);
  835. program_capture_engine(dev,1);
  836. } else {
  837. calculate_video_dma_grab_packed(dev, buf);
  838. program_capture_engine(dev,0);
  839. }
  840. /*
  841. printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1));
  842. printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1));
  843. printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1));
  844. printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1));
  845. printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1));
  846. printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1));
  847. printk("vdma%d => vptr : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1));
  848. */
  849. /* write the address of the rps-program */
  850. saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle);
  851. /* turn on rps */
  852. saa7146_write(dev, MC1, (MASK_12 | MASK_28));
  853. }