cx88-vbi.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. */
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <linux/init.h>
  6. #include "cx88.h"
  7. static unsigned int vbi_debug;
  8. module_param(vbi_debug,int,0644);
  9. MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
  10. #define dprintk(level,fmt, arg...) if (vbi_debug >= level) \
  11. printk(KERN_DEBUG "%s: " fmt, dev->core->name , ## arg)
  12. /* ------------------------------------------------------------------ */
  13. int cx8800_vbi_fmt (struct file *file, void *priv,
  14. struct v4l2_format *f)
  15. {
  16. struct cx8800_dev *dev = video_drvdata(file);
  17. f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
  18. f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
  19. f->fmt.vbi.offset = 244;
  20. if (dev->core->tvnorm & V4L2_STD_525_60) {
  21. /* ntsc */
  22. f->fmt.vbi.sampling_rate = 28636363;
  23. f->fmt.vbi.start[0] = 10;
  24. f->fmt.vbi.start[1] = 273;
  25. f->fmt.vbi.count[0] = VBI_LINE_NTSC_COUNT;
  26. f->fmt.vbi.count[1] = VBI_LINE_NTSC_COUNT;
  27. } else if (dev->core->tvnorm & V4L2_STD_625_50) {
  28. /* pal */
  29. f->fmt.vbi.sampling_rate = 35468950;
  30. f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
  31. f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
  32. f->fmt.vbi.count[0] = VBI_LINE_PAL_COUNT;
  33. f->fmt.vbi.count[1] = VBI_LINE_PAL_COUNT;
  34. }
  35. return 0;
  36. }
  37. static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
  38. struct cx88_dmaqueue *q,
  39. struct cx88_buffer *buf)
  40. {
  41. struct cx88_core *core = dev->core;
  42. /* setup fifo + format */
  43. cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
  44. VBI_LINE_LENGTH, buf->risc.dma);
  45. cx_write(MO_VBOS_CONTROL, ( (1 << 18) | // comb filter delay fixup
  46. (1 << 15) | // enable vbi capture
  47. (1 << 11) ));
  48. /* reset counter */
  49. cx_write(MO_VBI_GPCNTRL, GP_COUNT_CONTROL_RESET);
  50. q->count = 0;
  51. /* enable irqs */
  52. cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
  53. cx_set(MO_VID_INTMSK, 0x0f0088);
  54. /* enable capture */
  55. cx_set(VID_CAPTURE_CONTROL,0x18);
  56. /* start dma */
  57. cx_set(MO_DEV_CNTRL2, (1<<5));
  58. cx_set(MO_VID_DMACNTRL, 0x88);
  59. return 0;
  60. }
  61. void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
  62. {
  63. struct cx88_core *core = dev->core;
  64. /* stop dma */
  65. cx_clear(MO_VID_DMACNTRL, 0x88);
  66. /* disable capture */
  67. cx_clear(VID_CAPTURE_CONTROL,0x18);
  68. /* disable irqs */
  69. cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
  70. cx_clear(MO_VID_INTMSK, 0x0f0088);
  71. }
  72. int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
  73. struct cx88_dmaqueue *q)
  74. {
  75. struct cx88_buffer *buf;
  76. if (list_empty(&q->active))
  77. return 0;
  78. buf = list_entry(q->active.next, struct cx88_buffer, list);
  79. dprintk(2,"restart_queue [%p/%d]: restart dma\n",
  80. buf, buf->vb.vb2_buf.index);
  81. cx8800_start_vbi_dma(dev, q, buf);
  82. return 0;
  83. }
  84. /* ------------------------------------------------------------------ */
  85. static int queue_setup(struct vb2_queue *q, const void *parg,
  86. unsigned int *num_buffers, unsigned int *num_planes,
  87. unsigned int sizes[], void *alloc_ctxs[])
  88. {
  89. struct cx8800_dev *dev = q->drv_priv;
  90. *num_planes = 1;
  91. if (dev->core->tvnorm & V4L2_STD_525_60)
  92. sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
  93. else
  94. sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
  95. alloc_ctxs[0] = dev->alloc_ctx;
  96. return 0;
  97. }
  98. static int buffer_prepare(struct vb2_buffer *vb)
  99. {
  100. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  101. struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
  102. struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
  103. struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
  104. unsigned int lines;
  105. unsigned int size;
  106. if (dev->core->tvnorm & V4L2_STD_525_60)
  107. lines = VBI_LINE_NTSC_COUNT;
  108. else
  109. lines = VBI_LINE_PAL_COUNT;
  110. size = lines * VBI_LINE_LENGTH * 2;
  111. if (vb2_plane_size(vb, 0) < size)
  112. return -EINVAL;
  113. vb2_set_plane_payload(vb, 0, size);
  114. cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
  115. 0, VBI_LINE_LENGTH * lines,
  116. VBI_LINE_LENGTH, 0,
  117. lines);
  118. return 0;
  119. }
  120. static void buffer_finish(struct vb2_buffer *vb)
  121. {
  122. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  123. struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
  124. struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
  125. struct cx88_riscmem *risc = &buf->risc;
  126. if (risc->cpu)
  127. pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
  128. memset(risc, 0, sizeof(*risc));
  129. }
  130. static void buffer_queue(struct vb2_buffer *vb)
  131. {
  132. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  133. struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
  134. struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
  135. struct cx88_buffer *prev;
  136. struct cx88_dmaqueue *q = &dev->vbiq;
  137. /* add jump to start */
  138. buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
  139. buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
  140. buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
  141. if (list_empty(&q->active)) {
  142. list_add_tail(&buf->list, &q->active);
  143. cx8800_start_vbi_dma(dev, q, buf);
  144. dprintk(2,"[%p/%d] vbi_queue - first active\n",
  145. buf, buf->vb.vb2_buf.index);
  146. } else {
  147. buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
  148. prev = list_entry(q->active.prev, struct cx88_buffer, list);
  149. list_add_tail(&buf->list, &q->active);
  150. prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
  151. dprintk(2,"[%p/%d] buffer_queue - append to active\n",
  152. buf, buf->vb.vb2_buf.index);
  153. }
  154. }
  155. static int start_streaming(struct vb2_queue *q, unsigned int count)
  156. {
  157. struct cx8800_dev *dev = q->drv_priv;
  158. struct cx88_dmaqueue *dmaq = &dev->vbiq;
  159. struct cx88_buffer *buf = list_entry(dmaq->active.next,
  160. struct cx88_buffer, list);
  161. cx8800_start_vbi_dma(dev, dmaq, buf);
  162. return 0;
  163. }
  164. static void stop_streaming(struct vb2_queue *q)
  165. {
  166. struct cx8800_dev *dev = q->drv_priv;
  167. struct cx88_core *core = dev->core;
  168. struct cx88_dmaqueue *dmaq = &dev->vbiq;
  169. unsigned long flags;
  170. cx_clear(MO_VID_DMACNTRL, 0x11);
  171. cx_clear(VID_CAPTURE_CONTROL, 0x06);
  172. cx8800_stop_vbi_dma(dev);
  173. spin_lock_irqsave(&dev->slock, flags);
  174. while (!list_empty(&dmaq->active)) {
  175. struct cx88_buffer *buf = list_entry(dmaq->active.next,
  176. struct cx88_buffer, list);
  177. list_del(&buf->list);
  178. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
  179. }
  180. spin_unlock_irqrestore(&dev->slock, flags);
  181. }
  182. const struct vb2_ops cx8800_vbi_qops = {
  183. .queue_setup = queue_setup,
  184. .buf_prepare = buffer_prepare,
  185. .buf_finish = buffer_finish,
  186. .buf_queue = buffer_queue,
  187. .wait_prepare = vb2_ops_wait_prepare,
  188. .wait_finish = vb2_ops_wait_finish,
  189. .start_streaming = start_streaming,
  190. .stop_streaming = stop_streaming,
  191. };