cx23885-vbi.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * Driver for the Conexant CX23885 PCIe bridge
  3. *
  4. * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. *
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/moduleparam.h>
  20. #include <linux/init.h>
  21. #include "cx23885.h"
  22. static unsigned int vbibufs = 4;
  23. module_param(vbibufs, int, 0644);
  24. MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
  25. static unsigned int vbi_debug;
  26. module_param(vbi_debug, int, 0644);
  27. MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
  28. #define dprintk(level, fmt, arg...)\
  29. do { if (vbi_debug >= level)\
  30. printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
  31. } while (0)
  32. /* ------------------------------------------------------------------ */
  33. #define VBI_LINE_LENGTH 1440
  34. #define VBI_NTSC_LINE_COUNT 12
  35. #define VBI_PAL_LINE_COUNT 18
  36. int cx23885_vbi_fmt(struct file *file, void *priv,
  37. struct v4l2_format *f)
  38. {
  39. struct cx23885_dev *dev = video_drvdata(file);
  40. f->fmt.vbi.sampling_rate = 27000000;
  41. f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
  42. f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
  43. f->fmt.vbi.offset = 0;
  44. f->fmt.vbi.flags = 0;
  45. if (dev->tvnorm & V4L2_STD_525_60) {
  46. /* ntsc */
  47. f->fmt.vbi.start[0] = V4L2_VBI_ITU_525_F1_START + 9;
  48. f->fmt.vbi.start[1] = V4L2_VBI_ITU_525_F2_START + 9;
  49. f->fmt.vbi.count[0] = VBI_NTSC_LINE_COUNT;
  50. f->fmt.vbi.count[1] = VBI_NTSC_LINE_COUNT;
  51. } else if (dev->tvnorm & V4L2_STD_625_50) {
  52. /* pal */
  53. f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
  54. f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
  55. f->fmt.vbi.count[0] = VBI_PAL_LINE_COUNT;
  56. f->fmt.vbi.count[1] = VBI_PAL_LINE_COUNT;
  57. }
  58. return 0;
  59. }
  60. /* We're given the Video Interrupt status register.
  61. * The cx23885_video_irq() func has already validated
  62. * the potential error bits, we just need to
  63. * deal with vbi payload and return indication if
  64. * we actually processed any payload.
  65. */
  66. int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status)
  67. {
  68. u32 count;
  69. int handled = 0;
  70. if (status & VID_BC_MSK_VBI_RISCI1) {
  71. dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__);
  72. spin_lock(&dev->slock);
  73. count = cx_read(VID_A_GPCNT);
  74. cx23885_video_wakeup(dev, &dev->vbiq, count);
  75. spin_unlock(&dev->slock);
  76. handled++;
  77. }
  78. return handled;
  79. }
  80. static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
  81. struct cx23885_dmaqueue *q,
  82. struct cx23885_buffer *buf)
  83. {
  84. dprintk(1, "%s()\n", __func__);
  85. /* setup fifo + format */
  86. cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02],
  87. VBI_LINE_LENGTH, buf->risc.dma);
  88. /* reset counter */
  89. cx_write(VID_A_GPCNT_CTL, 3);
  90. cx_write(VID_A_VBI_CTRL, 3);
  91. cx_write(VBI_A_GPCNT_CTL, 3);
  92. q->count = 0;
  93. /* enable irq */
  94. cx23885_irq_add_enable(dev, 0x01);
  95. cx_set(VID_A_INT_MSK, 0x000022);
  96. /* start dma */
  97. cx_set(DEV_CNTRL2, (1<<5));
  98. cx_set(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */
  99. return 0;
  100. }
  101. /* ------------------------------------------------------------------ */
  102. static int queue_setup(struct vb2_queue *q, const void *parg,
  103. unsigned int *num_buffers, unsigned int *num_planes,
  104. unsigned int sizes[], void *alloc_ctxs[])
  105. {
  106. struct cx23885_dev *dev = q->drv_priv;
  107. unsigned lines = VBI_PAL_LINE_COUNT;
  108. if (dev->tvnorm & V4L2_STD_525_60)
  109. lines = VBI_NTSC_LINE_COUNT;
  110. *num_planes = 1;
  111. sizes[0] = lines * VBI_LINE_LENGTH * 2;
  112. alloc_ctxs[0] = dev->alloc_ctx;
  113. return 0;
  114. }
  115. static int buffer_prepare(struct vb2_buffer *vb)
  116. {
  117. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  118. struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
  119. struct cx23885_buffer *buf = container_of(vbuf,
  120. struct cx23885_buffer, vb);
  121. struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
  122. unsigned lines = VBI_PAL_LINE_COUNT;
  123. if (dev->tvnorm & V4L2_STD_525_60)
  124. lines = VBI_NTSC_LINE_COUNT;
  125. if (vb2_plane_size(vb, 0) < lines * VBI_LINE_LENGTH * 2)
  126. return -EINVAL;
  127. vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2);
  128. cx23885_risc_vbibuffer(dev->pci, &buf->risc,
  129. sgt->sgl,
  130. 0, VBI_LINE_LENGTH * lines,
  131. VBI_LINE_LENGTH, 0,
  132. lines);
  133. return 0;
  134. }
  135. static void buffer_finish(struct vb2_buffer *vb)
  136. {
  137. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  138. struct cx23885_buffer *buf = container_of(vbuf,
  139. struct cx23885_buffer, vb);
  140. cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
  141. }
  142. /*
  143. * The risc program for each buffer works as follows: it starts with a simple
  144. * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
  145. * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
  146. * the initial JUMP).
  147. *
  148. * This is the risc program of the first buffer to be queued if the active list
  149. * is empty and it just keeps DMAing this buffer without generating any
  150. * interrupts.
  151. *
  152. * If a new buffer is added then the initial JUMP in the code for that buffer
  153. * will generate an interrupt which signals that the previous buffer has been
  154. * DMAed successfully and that it can be returned to userspace.
  155. *
  156. * It also sets the final jump of the previous buffer to the start of the new
  157. * buffer, thus chaining the new buffer into the DMA chain. This is a single
  158. * atomic u32 write, so there is no race condition.
  159. *
  160. * The end-result of all this that you only get an interrupt when a buffer
  161. * is ready, so the control flow is very easy.
  162. */
  163. static void buffer_queue(struct vb2_buffer *vb)
  164. {
  165. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  166. struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
  167. struct cx23885_buffer *buf = container_of(vbuf,
  168. struct cx23885_buffer, vb);
  169. struct cx23885_buffer *prev;
  170. struct cx23885_dmaqueue *q = &dev->vbiq;
  171. unsigned long flags;
  172. buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
  173. buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
  174. buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
  175. buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
  176. if (list_empty(&q->active)) {
  177. spin_lock_irqsave(&dev->slock, flags);
  178. list_add_tail(&buf->queue, &q->active);
  179. spin_unlock_irqrestore(&dev->slock, flags);
  180. dprintk(2, "[%p/%d] vbi_queue - first active\n",
  181. buf, buf->vb.vb2_buf.index);
  182. } else {
  183. buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
  184. prev = list_entry(q->active.prev, struct cx23885_buffer,
  185. queue);
  186. spin_lock_irqsave(&dev->slock, flags);
  187. list_add_tail(&buf->queue, &q->active);
  188. spin_unlock_irqrestore(&dev->slock, flags);
  189. prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
  190. dprintk(2, "[%p/%d] buffer_queue - append to active\n",
  191. buf, buf->vb.vb2_buf.index);
  192. }
  193. }
  194. static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
  195. {
  196. struct cx23885_dev *dev = q->drv_priv;
  197. struct cx23885_dmaqueue *dmaq = &dev->vbiq;
  198. struct cx23885_buffer *buf = list_entry(dmaq->active.next,
  199. struct cx23885_buffer, queue);
  200. cx23885_start_vbi_dma(dev, dmaq, buf);
  201. return 0;
  202. }
  203. static void cx23885_stop_streaming(struct vb2_queue *q)
  204. {
  205. struct cx23885_dev *dev = q->drv_priv;
  206. struct cx23885_dmaqueue *dmaq = &dev->vbiq;
  207. unsigned long flags;
  208. cx_clear(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */
  209. spin_lock_irqsave(&dev->slock, flags);
  210. while (!list_empty(&dmaq->active)) {
  211. struct cx23885_buffer *buf = list_entry(dmaq->active.next,
  212. struct cx23885_buffer, queue);
  213. list_del(&buf->queue);
  214. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
  215. }
  216. spin_unlock_irqrestore(&dev->slock, flags);
  217. }
  218. struct vb2_ops cx23885_vbi_qops = {
  219. .queue_setup = queue_setup,
  220. .buf_prepare = buffer_prepare,
  221. .buf_finish = buffer_finish,
  222. .buf_queue = buffer_queue,
  223. .wait_prepare = vb2_ops_wait_prepare,
  224. .wait_finish = vb2_ops_wait_finish,
  225. .start_streaming = cx23885_start_streaming,
  226. .stop_streaming = cx23885_stop_streaming,
  227. };