rcar_jpu.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804
  1. /*
  2. * Author: Mikhail Ulyanov
  3. * Copyright (C) 2014-2015 Cogent Embedded, Inc. <source@cogentembedded.com>
  4. * Copyright (C) 2014-2015 Renesas Electronics Corporation
  5. *
  6. * This is based on the drivers/media/platform/s5p-jpeg driver by
  7. * Andrzej Pietrasiewicz and Jacek Anaszewski.
  8. * Some portions of code inspired by VSP1 driver by Laurent Pinchart.
  9. *
  10. * TODO in order of priority:
  11. * 1) Rotation
  12. * 2) Cropping
  13. * 3) V4L2_CID_JPEG_ACTIVE_MARKER
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License version 2 as
  17. * published by the Free Software Foundation.
  18. */
  19. #include <asm/unaligned.h>
  20. #include <linux/clk.h>
  21. #include <linux/err.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/io.h>
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/of.h>
  27. #include <linux/of_device.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/slab.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/string.h>
  32. #include <linux/videodev2.h>
  33. #include <media/v4l2-ctrls.h>
  34. #include <media/v4l2-device.h>
  35. #include <media/v4l2-event.h>
  36. #include <media/v4l2-fh.h>
  37. #include <media/v4l2-mem2mem.h>
  38. #include <media/v4l2-ioctl.h>
  39. #include <media/videobuf2-v4l2.h>
  40. #include <media/videobuf2-dma-contig.h>
  41. #define DRV_NAME "rcar_jpu"
  42. /*
  43. * Align JPEG header end to cache line to make sure we will not have any issues
  44. * with cache; additionally to requerment (33.3.27 R01UH0501EJ0100 Rev.1.00)
  45. */
  46. #define JPU_JPEG_HDR_SIZE (ALIGN(0x258, L1_CACHE_BYTES))
  47. #define JPU_JPEG_MAX_BYTES_PER_PIXEL 2 /* 16 bit precision format */
  48. #define JPU_JPEG_MIN_SIZE 25 /* SOI + SOF + EOI */
  49. #define JPU_JPEG_QTBL_SIZE 0x40
  50. #define JPU_JPEG_HDCTBL_SIZE 0x1c
  51. #define JPU_JPEG_HACTBL_SIZE 0xb2
  52. #define JPU_JPEG_HEIGHT_OFFSET 0x91
  53. #define JPU_JPEG_WIDTH_OFFSET 0x93
  54. #define JPU_JPEG_SUBS_OFFSET 0x97
  55. #define JPU_JPEG_QTBL_LUM_OFFSET 0x07
  56. #define JPU_JPEG_QTBL_CHR_OFFSET 0x4c
  57. #define JPU_JPEG_HDCTBL_LUM_OFFSET 0xa4
  58. #define JPU_JPEG_HACTBL_LUM_OFFSET 0xc5
  59. #define JPU_JPEG_HDCTBL_CHR_OFFSET 0x17c
  60. #define JPU_JPEG_HACTBL_CHR_OFFSET 0x19d
  61. #define JPU_JPEG_PADDING_OFFSET 0x24f
  62. #define JPU_JPEG_LUM 0x00
  63. #define JPU_JPEG_CHR 0x01
  64. #define JPU_JPEG_DC 0x00
  65. #define JPU_JPEG_AC 0x10
  66. #define JPU_JPEG_422 0x21
  67. #define JPU_JPEG_420 0x22
  68. #define JPU_JPEG_DEFAULT_422_PIX_FMT V4L2_PIX_FMT_NV16M
  69. #define JPU_JPEG_DEFAULT_420_PIX_FMT V4L2_PIX_FMT_NV12M
  70. /* JPEG markers */
  71. #define TEM 0x01
  72. #define SOF0 0xc0
  73. #define RST 0xd0
  74. #define SOI 0xd8
  75. #define EOI 0xd9
  76. #define DHP 0xde
  77. #define DHT 0xc4
  78. #define COM 0xfe
  79. #define DQT 0xdb
  80. #define DRI 0xdd
  81. #define APP0 0xe0
  82. #define JPU_RESET_TIMEOUT 100 /* ms */
  83. #define JPU_JOB_TIMEOUT 300 /* ms */
  84. #define JPU_MAX_QUALITY 4
  85. #define JPU_WIDTH_MIN 16
  86. #define JPU_HEIGHT_MIN 16
  87. #define JPU_WIDTH_MAX 4096
  88. #define JPU_HEIGHT_MAX 4096
  89. #define JPU_MEMALIGN 8
  90. /* Flags that indicate a format can be used for capture/output */
  91. #define JPU_FMT_TYPE_OUTPUT 0
  92. #define JPU_FMT_TYPE_CAPTURE 1
  93. #define JPU_ENC_CAPTURE (1 << 0)
  94. #define JPU_ENC_OUTPUT (1 << 1)
  95. #define JPU_DEC_CAPTURE (1 << 2)
  96. #define JPU_DEC_OUTPUT (1 << 3)
  97. /*
  98. * JPEG registers and bits
  99. */
  100. /* JPEG code mode register */
  101. #define JCMOD 0x00
  102. #define JCMOD_PCTR (1 << 7)
  103. #define JCMOD_MSKIP_ENABLE (1 << 5)
  104. #define JCMOD_DSP_ENC (0 << 3)
  105. #define JCMOD_DSP_DEC (1 << 3)
  106. #define JCMOD_REDU (7 << 0)
  107. #define JCMOD_REDU_422 (1 << 0)
  108. #define JCMOD_REDU_420 (2 << 0)
  109. /* JPEG code command register */
  110. #define JCCMD 0x04
  111. #define JCCMD_SRST (1 << 12)
  112. #define JCCMD_JEND (1 << 2)
  113. #define JCCMD_JSRT (1 << 0)
  114. /* JPEG code quantanization table number register */
  115. #define JCQTN 0x0c
  116. #define JCQTN_SHIFT(t) (((t) - 1) << 1)
  117. /* JPEG code Huffman table number register */
  118. #define JCHTN 0x10
  119. #define JCHTN_AC_SHIFT(t) (((t) << 1) - 1)
  120. #define JCHTN_DC_SHIFT(t) (((t) - 1) << 1)
  121. #define JCVSZU 0x1c /* JPEG code vertical size upper register */
  122. #define JCVSZD 0x20 /* JPEG code vertical size lower register */
  123. #define JCHSZU 0x24 /* JPEG code horizontal size upper register */
  124. #define JCHSZD 0x28 /* JPEG code horizontal size lower register */
  125. #define JCSZ_MASK 0xff /* JPEG code h/v size register contains only 1 byte*/
  126. #define JCDTCU 0x2c /* JPEG code data count upper register */
  127. #define JCDTCM 0x30 /* JPEG code data count middle register */
  128. #define JCDTCD 0x34 /* JPEG code data count lower register */
  129. /* JPEG interrupt enable register */
  130. #define JINTE 0x38
  131. #define JINTE_ERR (7 << 5) /* INT5 + INT6 + INT7 */
  132. #define JINTE_TRANSF_COMPL (1 << 10)
  133. /* JPEG interrupt status register */
  134. #define JINTS 0x3c
  135. #define JINTS_MASK 0x7c68
  136. #define JINTS_ERR (1 << 5)
  137. #define JINTS_PROCESS_COMPL (1 << 6)
  138. #define JINTS_TRANSF_COMPL (1 << 10)
  139. #define JCDERR 0x40 /* JPEG code decode error register */
  140. #define JCDERR_MASK 0xf /* JPEG code decode error register mask*/
  141. /* JPEG interface encoding */
  142. #define JIFECNT 0x70
  143. #define JIFECNT_INFT_422 0
  144. #define JIFECNT_INFT_420 1
  145. #define JIFECNT_SWAP_WB (3 << 4) /* to JPU */
  146. #define JIFESYA1 0x74 /* encode source Y address register 1 */
  147. #define JIFESCA1 0x78 /* encode source C address register 1 */
  148. #define JIFESYA2 0x7c /* encode source Y address register 2 */
  149. #define JIFESCA2 0x80 /* encode source C address register 2 */
  150. #define JIFESMW 0x84 /* encode source memory width register */
  151. #define JIFESVSZ 0x88 /* encode source vertical size register */
  152. #define JIFESHSZ 0x8c /* encode source horizontal size register */
  153. #define JIFEDA1 0x90 /* encode destination address register 1 */
  154. #define JIFEDA2 0x94 /* encode destination address register 2 */
  155. /* JPEG decoding control register */
  156. #define JIFDCNT 0xa0
  157. #define JIFDCNT_SWAP_WB (3 << 1) /* from JPU */
  158. #define JIFDSA1 0xa4 /* decode source address register 1 */
  159. #define JIFDDMW 0xb0 /* decode destination memory width register */
  160. #define JIFDDVSZ 0xb4 /* decode destination vert. size register */
  161. #define JIFDDHSZ 0xb8 /* decode destination horiz. size register */
  162. #define JIFDDYA1 0xbc /* decode destination Y address register 1 */
  163. #define JIFDDCA1 0xc0 /* decode destination C address register 1 */
  164. #define JCQTBL(n) (0x10000 + (n) * 0x40) /* quantization tables regs */
  165. #define JCHTBD(n) (0x10100 + (n) * 0x100) /* Huffman table DC regs */
  166. #define JCHTBA(n) (0x10120 + (n) * 0x100) /* Huffman table AC regs */
  167. /**
  168. * struct jpu - JPEG IP abstraction
  169. * @mutex: the mutex protecting this structure
  170. * @lock: spinlock protecting the device contexts
  171. * @v4l2_dev: v4l2 device for mem2mem mode
  172. * @vfd_encoder: video device node for encoder mem2mem mode
  173. * @vfd_decoder: video device node for decoder mem2mem mode
  174. * @m2m_dev: v4l2 mem2mem device data
  175. * @curr: pointer to current context
  176. * @irq_queue: interrupt handler waitqueue
  177. * @regs: JPEG IP registers mapping
  178. * @irq: JPEG IP irq
  179. * @clk: JPEG IP clock
  180. * @dev: JPEG IP struct device
  181. * @alloc_ctx: videobuf2 memory allocator's context
  182. * @ref_count: reference counter
  183. */
  184. struct jpu {
  185. struct mutex mutex;
  186. spinlock_t lock;
  187. struct v4l2_device v4l2_dev;
  188. struct video_device vfd_encoder;
  189. struct video_device vfd_decoder;
  190. struct v4l2_m2m_dev *m2m_dev;
  191. struct jpu_ctx *curr;
  192. wait_queue_head_t irq_queue;
  193. void __iomem *regs;
  194. unsigned int irq;
  195. struct clk *clk;
  196. struct device *dev;
  197. void *alloc_ctx;
  198. int ref_count;
  199. };
  200. /**
  201. * struct jpu_buffer - driver's specific video buffer
  202. * @buf: m2m buffer
  203. * @compr_quality: destination image quality in compression mode
  204. * @subsampling: source image subsampling in decompression mode
  205. */
  206. struct jpu_buffer {
  207. struct v4l2_m2m_buffer buf;
  208. unsigned short compr_quality;
  209. unsigned char subsampling;
  210. };
  211. /**
  212. * struct jpu_fmt - driver's internal format data
  213. * @fourcc: the fourcc code, 0 if not applicable
  214. * @colorspace: the colorspace specifier
  215. * @bpp: number of bits per pixel per plane
  216. * @h_align: horizontal alignment order (align to 2^h_align)
  217. * @v_align: vertical alignment order (align to 2^v_align)
  218. * @subsampling: (horizontal:4 | vertical:4) subsampling factor
  219. * @num_planes: number of planes
  220. * @types: types of queue this format is applicable to
  221. */
  222. struct jpu_fmt {
  223. u32 fourcc;
  224. u32 colorspace;
  225. u8 bpp[2];
  226. u8 h_align;
  227. u8 v_align;
  228. u8 subsampling;
  229. u8 num_planes;
  230. u16 types;
  231. };
  232. /**
  233. * jpu_q_data - parameters of one queue
  234. * @fmtinfo: driver-specific format of this queue
  235. * @format: multiplanar format of this queue
  236. * @sequence: sequence number
  237. */
  238. struct jpu_q_data {
  239. struct jpu_fmt *fmtinfo;
  240. struct v4l2_pix_format_mplane format;
  241. unsigned int sequence;
  242. };
  243. /**
  244. * jpu_ctx - the device context data
  245. * @jpu: JPEG IP device for this context
  246. * @encoder: compression (encode) operation or decompression (decode)
  247. * @compr_quality: destination image quality in compression (encode) mode
  248. * @out_q: source (output) queue information
  249. * @cap_q: destination (capture) queue information
  250. * @fh: file handler
  251. * @ctrl_handler: controls handler
  252. */
  253. struct jpu_ctx {
  254. struct jpu *jpu;
  255. bool encoder;
  256. unsigned short compr_quality;
  257. struct jpu_q_data out_q;
  258. struct jpu_q_data cap_q;
  259. struct v4l2_fh fh;
  260. struct v4l2_ctrl_handler ctrl_handler;
  261. };
  262. /**
  263. * jpeg_buffer - description of memory containing input JPEG data
  264. * @end: end position in the buffer
  265. * @curr: current position in the buffer
  266. */
  267. struct jpeg_buffer {
  268. void *end;
  269. void *curr;
  270. };
  271. static struct jpu_fmt jpu_formats[] = {
  272. { V4L2_PIX_FMT_JPEG, V4L2_COLORSPACE_JPEG,
  273. {0, 0}, 0, 0, 0, 1, JPU_ENC_CAPTURE | JPU_DEC_OUTPUT },
  274. { V4L2_PIX_FMT_NV16M, V4L2_COLORSPACE_SRGB,
  275. {8, 8}, 2, 2, JPU_JPEG_422, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
  276. { V4L2_PIX_FMT_NV12M, V4L2_COLORSPACE_SRGB,
  277. {8, 4}, 2, 2, JPU_JPEG_420, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
  278. { V4L2_PIX_FMT_NV16, V4L2_COLORSPACE_SRGB,
  279. {16, 0}, 2, 2, JPU_JPEG_422, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
  280. { V4L2_PIX_FMT_NV12, V4L2_COLORSPACE_SRGB,
  281. {12, 0}, 2, 2, JPU_JPEG_420, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
  282. };
  283. static const u8 zigzag[] = {
  284. 0x03, 0x02, 0x0b, 0x13, 0x0a, 0x01, 0x00, 0x09,
  285. 0x12, 0x1b, 0x23, 0x1a, 0x11, 0x08, 0x07, 0x06,
  286. 0x0f, 0x10, 0x19, 0x22, 0x2b, 0x33, 0x2a, 0x21,
  287. 0x18, 0x17, 0x0e, 0x05, 0x04, 0x0d, 0x16, 0x1f,
  288. 0x20, 0x29, 0x32, 0x3b, 0x3a, 0x31, 0x28, 0x27,
  289. 0x1e, 0x15, 0x0e, 0x14, 0x10, 0x26, 0x2f, 0x30,
  290. 0x39, 0x38, 0x37, 0x2e, 0x25, 0x1c, 0x24, 0x2b,
  291. 0x36, 0x3f, 0x3e, 0x35, 0x2c, 0x34, 0x3d, 0x3c
  292. };
  293. #define QTBL_SIZE (ALIGN(JPU_JPEG_QTBL_SIZE, \
  294. sizeof(unsigned int)) / sizeof(unsigned int))
  295. #define HDCTBL_SIZE (ALIGN(JPU_JPEG_HDCTBL_SIZE, \
  296. sizeof(unsigned int)) / sizeof(unsigned int))
  297. #define HACTBL_SIZE (ALIGN(JPU_JPEG_HACTBL_SIZE, \
  298. sizeof(unsigned int)) / sizeof(unsigned int))
  299. /*
  300. * Start of image; Quantization tables
  301. * SOF0 (17 bytes payload) is Baseline DCT - Sample precision, height, width,
  302. * Number of image components, (Ci:8 - Hi:4 - Vi:4 - Tq:8) * 3 - Y,Cb,Cr;
  303. * Huffman tables; Padding with 0xff (33.3.27 R01UH0501EJ0100 Rev.1.00)
  304. */
  305. #define JPU_JPEG_HDR_BLOB { \
  306. 0xff, SOI, 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_LUM, \
  307. [JPU_JPEG_QTBL_LUM_OFFSET ... \
  308. JPU_JPEG_QTBL_LUM_OFFSET + JPU_JPEG_QTBL_SIZE - 1] = 0x00, \
  309. 0xff, DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_CHR, \
  310. [JPU_JPEG_QTBL_CHR_OFFSET ... JPU_JPEG_QTBL_CHR_OFFSET + \
  311. JPU_JPEG_QTBL_SIZE - 1] = 0x00, 0xff, SOF0, 0x00, 0x11, 0x08, \
  312. [JPU_JPEG_HEIGHT_OFFSET ... JPU_JPEG_HEIGHT_OFFSET + 1] = 0x00, \
  313. [JPU_JPEG_WIDTH_OFFSET ... JPU_JPEG_WIDTH_OFFSET + 1] = 0x00, \
  314. 0x03, 0x01, [JPU_JPEG_SUBS_OFFSET] = 0x00, JPU_JPEG_LUM, \
  315. 0x02, 0x11, JPU_JPEG_CHR, 0x03, 0x11, JPU_JPEG_CHR, \
  316. 0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_DC, \
  317. [JPU_JPEG_HDCTBL_LUM_OFFSET ... \
  318. JPU_JPEG_HDCTBL_LUM_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
  319. 0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_LUM|JPU_JPEG_AC, \
  320. [JPU_JPEG_HACTBL_LUM_OFFSET ... \
  321. JPU_JPEG_HACTBL_LUM_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
  322. 0xff, DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_DC, \
  323. [JPU_JPEG_HDCTBL_CHR_OFFSET ... \
  324. JPU_JPEG_HDCTBL_CHR_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
  325. 0xff, DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, JPU_JPEG_CHR|JPU_JPEG_AC, \
  326. [JPU_JPEG_HACTBL_CHR_OFFSET ... \
  327. JPU_JPEG_HACTBL_CHR_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
  328. [JPU_JPEG_PADDING_OFFSET ... JPU_JPEG_HDR_SIZE - 1] = 0xff \
  329. }
  330. static unsigned char jpeg_hdrs[JPU_MAX_QUALITY][JPU_JPEG_HDR_SIZE] = {
  331. [0 ... JPU_MAX_QUALITY - 1] = JPU_JPEG_HDR_BLOB
  332. };
  333. static const unsigned int qtbl_lum[JPU_MAX_QUALITY][QTBL_SIZE] = {
  334. {
  335. 0x14101927, 0x322e3e44, 0x10121726, 0x26354144,
  336. 0x19171f26, 0x35414444, 0x27262635, 0x41444444,
  337. 0x32263541, 0x44444444, 0x2e354144, 0x44444444,
  338. 0x3e414444, 0x44444444, 0x44444444, 0x44444444
  339. },
  340. {
  341. 0x100b0b10, 0x171b1f1e, 0x0b0c0c0f, 0x1417171e,
  342. 0x0b0c0d10, 0x171a232f, 0x100f1017, 0x1a252f40,
  343. 0x1714171a, 0x27334040, 0x1b171a25, 0x33404040,
  344. 0x1f17232f, 0x40404040, 0x1e1e2f40, 0x40404040
  345. },
  346. {
  347. 0x0c08080c, 0x11151817, 0x0809090b, 0x0f131217,
  348. 0x08090a0c, 0x13141b24, 0x0c0b0c15, 0x141c2435,
  349. 0x110f1314, 0x1e27333b, 0x1513141c, 0x27333b3b,
  350. 0x18121b24, 0x333b3b3b, 0x17172435, 0x3b3b3b3b
  351. },
  352. {
  353. 0x08060608, 0x0c0e1011, 0x06060608, 0x0a0d0c0f,
  354. 0x06060708, 0x0d0e1218, 0x0808080e, 0x0d131823,
  355. 0x0c0a0d0d, 0x141a2227, 0x0e0d0e13, 0x1a222727,
  356. 0x100c1318, 0x22272727, 0x110f1823, 0x27272727
  357. }
  358. };
  359. static const unsigned int qtbl_chr[JPU_MAX_QUALITY][QTBL_SIZE] = {
  360. {
  361. 0x15192026, 0x36444444, 0x191c1826, 0x36444444,
  362. 0x2018202b, 0x42444444, 0x26262b35, 0x44444444,
  363. 0x36424444, 0x44444444, 0x44444444, 0x44444444,
  364. 0x44444444, 0x44444444, 0x44444444, 0x44444444
  365. },
  366. {
  367. 0x110f1115, 0x141a2630, 0x0f131211, 0x141a232b,
  368. 0x11121416, 0x1a1e2e35, 0x1511161c, 0x1e273540,
  369. 0x14141a1e, 0x27304040, 0x1a1a1e27, 0x303f4040,
  370. 0x26232e35, 0x40404040, 0x302b3540, 0x40404040
  371. },
  372. {
  373. 0x0d0b0d10, 0x14141d25, 0x0b0e0e0e, 0x10141a20,
  374. 0x0d0e0f11, 0x14172328, 0x100e1115, 0x171e2832,
  375. 0x14101417, 0x1e25323b, 0x1414171e, 0x25303b3b,
  376. 0x1d1a2328, 0x323b3b3b, 0x25202832, 0x3b3b3b3b
  377. },
  378. {
  379. 0x0908090b, 0x0e111318, 0x080a090b, 0x0e0d1116,
  380. 0x09090d0e, 0x0d0f171a, 0x0b0b0e0e, 0x0f141a21,
  381. 0x0e0e0d0f, 0x14182127, 0x110d0f14, 0x18202727,
  382. 0x1311171a, 0x21272727, 0x18161a21, 0x27272727
  383. }
  384. };
  385. static const unsigned int hdctbl_lum[HDCTBL_SIZE] = {
  386. 0x00010501, 0x01010101, 0x01000000, 0x00000000,
  387. 0x00010203, 0x04050607, 0x08090a0b
  388. };
  389. static const unsigned int hdctbl_chr[HDCTBL_SIZE] = {
  390. 0x00010501, 0x01010101, 0x01000000, 0x00000000,
  391. 0x00010203, 0x04050607, 0x08090a0b
  392. };
  393. static const unsigned int hactbl_lum[HACTBL_SIZE] = {
  394. 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
  395. 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
  396. 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
  397. 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
  398. 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
  399. 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
  400. 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
  401. 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
  402. };
  403. static const unsigned int hactbl_chr[HACTBL_SIZE] = {
  404. 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
  405. 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
  406. 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
  407. 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
  408. 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
  409. 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
  410. 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
  411. 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
  412. };
  413. static const char *error_to_text[16] = {
  414. "Normal",
  415. "SOI not detected",
  416. "SOF1 to SOFF detected",
  417. "Subsampling not detected",
  418. "SOF accuracy error",
  419. "DQT accuracy error",
  420. "Component error 1",
  421. "Component error 2",
  422. "SOF0, DQT, and DHT not detected when SOS detected",
  423. "SOS not detected",
  424. "EOI not detected",
  425. "Restart interval data number error detected",
  426. "Image size error",
  427. "Last MCU data number error",
  428. "Block data number error",
  429. "Unknown"
  430. };
  431. static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_v4l2_buffer *vb)
  432. {
  433. struct v4l2_m2m_buffer *b =
  434. container_of(vb, struct v4l2_m2m_buffer, vb);
  435. return container_of(b, struct jpu_buffer, buf);
  436. }
  437. static u32 jpu_read(struct jpu *jpu, unsigned int reg)
  438. {
  439. return ioread32(jpu->regs + reg);
  440. }
  441. static void jpu_write(struct jpu *jpu, u32 val, unsigned int reg)
  442. {
  443. iowrite32(val, jpu->regs + reg);
  444. }
  445. static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
  446. {
  447. return container_of(c->handler, struct jpu_ctx, ctrl_handler);
  448. }
  449. static struct jpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
  450. {
  451. return container_of(fh, struct jpu_ctx, fh);
  452. }
  453. static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl,
  454. unsigned int len) {
  455. unsigned int i;
  456. for (i = 0; i < len; i++)
  457. jpu_write(jpu, tbl[i], reg + (i << 2));
  458. }
  459. static void jpu_set_qtbl(struct jpu *jpu, unsigned short quality)
  460. {
  461. jpu_set_tbl(jpu, JCQTBL(0), qtbl_lum[quality], QTBL_SIZE);
  462. jpu_set_tbl(jpu, JCQTBL(1), qtbl_chr[quality], QTBL_SIZE);
  463. }
  464. static void jpu_set_htbl(struct jpu *jpu)
  465. {
  466. jpu_set_tbl(jpu, JCHTBD(0), hdctbl_lum, HDCTBL_SIZE);
  467. jpu_set_tbl(jpu, JCHTBA(0), hactbl_lum, HACTBL_SIZE);
  468. jpu_set_tbl(jpu, JCHTBD(1), hdctbl_chr, HDCTBL_SIZE);
  469. jpu_set_tbl(jpu, JCHTBA(1), hactbl_chr, HACTBL_SIZE);
  470. }
  471. static int jpu_wait_reset(struct jpu *jpu)
  472. {
  473. unsigned long timeout;
  474. timeout = jiffies + msecs_to_jiffies(JPU_RESET_TIMEOUT);
  475. while (jpu_read(jpu, JCCMD) & JCCMD_SRST) {
  476. if (time_after(jiffies, timeout)) {
  477. dev_err(jpu->dev, "timed out in reset\n");
  478. return -ETIMEDOUT;
  479. }
  480. schedule();
  481. }
  482. return 0;
  483. }
  484. static int jpu_reset(struct jpu *jpu)
  485. {
  486. jpu_write(jpu, JCCMD_SRST, JCCMD);
  487. return jpu_wait_reset(jpu);
  488. }
  489. /*
  490. * ============================================================================
  491. * video ioctl operations
  492. * ============================================================================
  493. */
  494. static void put_qtbl(u8 *p, const u8 *qtbl)
  495. {
  496. unsigned int i;
  497. for (i = 0; i < ARRAY_SIZE(zigzag); i++)
  498. p[i] = *(qtbl + zigzag[i]);
  499. }
  500. static void put_htbl(u8 *p, const u8 *htbl, unsigned int len)
  501. {
  502. unsigned int i, j;
  503. for (i = 0; i < len; i += 4)
  504. for (j = 0; j < 4 && (i + j) < len; ++j)
  505. p[i + j] = htbl[i + 3 - j];
  506. }
  507. static void jpu_generate_hdr(unsigned short quality, unsigned char *p)
  508. {
  509. put_qtbl(p + JPU_JPEG_QTBL_LUM_OFFSET, (const u8 *)qtbl_lum[quality]);
  510. put_qtbl(p + JPU_JPEG_QTBL_CHR_OFFSET, (const u8 *)qtbl_chr[quality]);
  511. put_htbl(p + JPU_JPEG_HDCTBL_LUM_OFFSET, (const u8 *)hdctbl_lum,
  512. JPU_JPEG_HDCTBL_SIZE);
  513. put_htbl(p + JPU_JPEG_HACTBL_LUM_OFFSET, (const u8 *)hactbl_lum,
  514. JPU_JPEG_HACTBL_SIZE);
  515. put_htbl(p + JPU_JPEG_HDCTBL_CHR_OFFSET, (const u8 *)hdctbl_chr,
  516. JPU_JPEG_HDCTBL_SIZE);
  517. put_htbl(p + JPU_JPEG_HACTBL_CHR_OFFSET, (const u8 *)hactbl_chr,
  518. JPU_JPEG_HACTBL_SIZE);
  519. }
  520. static int get_byte(struct jpeg_buffer *buf)
  521. {
  522. if (buf->curr >= buf->end)
  523. return -1;
  524. return *(u8 *)buf->curr++;
  525. }
  526. static int get_word_be(struct jpeg_buffer *buf, unsigned int *word)
  527. {
  528. if (buf->end - buf->curr < 2)
  529. return -1;
  530. *word = get_unaligned_be16(buf->curr);
  531. buf->curr += 2;
  532. return 0;
  533. }
  534. static void skip(struct jpeg_buffer *buf, unsigned long len)
  535. {
  536. buf->curr += min((unsigned long)(buf->end - buf->curr), len);
  537. }
  538. static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
  539. unsigned int *height)
  540. {
  541. struct jpeg_buffer jpeg_buffer;
  542. unsigned int word;
  543. bool soi = false;
  544. jpeg_buffer.end = buffer + size;
  545. jpeg_buffer.curr = buffer;
  546. /*
  547. * basic size check and EOI - we don't want to let JPU cross
  548. * buffer bounds in any case. Hope it's stopping by EOI.
  549. */
  550. if (size < JPU_JPEG_MIN_SIZE || *(u8 *)(buffer + size - 1) != EOI)
  551. return 0;
  552. for (;;) {
  553. int c;
  554. /* skip preceding filler bytes */
  555. do
  556. c = get_byte(&jpeg_buffer);
  557. while (c == 0xff || c == 0);
  558. if (!soi && c == SOI) {
  559. soi = true;
  560. continue;
  561. } else if (soi != (c != SOI))
  562. return 0;
  563. switch (c) {
  564. case SOF0: /* SOF0: baseline JPEG */
  565. skip(&jpeg_buffer, 3); /* segment length and bpp */
  566. if (get_word_be(&jpeg_buffer, height) ||
  567. get_word_be(&jpeg_buffer, width) ||
  568. get_byte(&jpeg_buffer) != 3) /* YCbCr only */
  569. return 0;
  570. skip(&jpeg_buffer, 1);
  571. return get_byte(&jpeg_buffer);
  572. case DHT:
  573. case DQT:
  574. case COM:
  575. case DRI:
  576. case APP0 ... APP0 + 0x0f:
  577. if (get_word_be(&jpeg_buffer, &word))
  578. return 0;
  579. skip(&jpeg_buffer, (long)word - 2);
  580. case 0:
  581. break;
  582. default:
  583. return 0;
  584. }
  585. }
  586. return 0;
  587. }
  588. static int jpu_querycap(struct file *file, void *priv,
  589. struct v4l2_capability *cap)
  590. {
  591. struct jpu_ctx *ctx = fh_to_ctx(priv);
  592. if (ctx->encoder)
  593. strlcpy(cap->card, DRV_NAME " encoder", sizeof(cap->card));
  594. else
  595. strlcpy(cap->card, DRV_NAME " decoder", sizeof(cap->card));
  596. strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
  597. snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
  598. dev_name(ctx->jpu->dev));
  599. cap->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
  600. cap->capabilities = V4L2_CAP_DEVICE_CAPS | cap->device_caps;
  601. memset(cap->reserved, 0, sizeof(cap->reserved));
  602. return 0;
  603. }
  604. static struct jpu_fmt *jpu_find_format(bool encoder, u32 pixelformat,
  605. unsigned int fmt_type)
  606. {
  607. unsigned int i, fmt_flag;
  608. if (encoder)
  609. fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_ENC_OUTPUT :
  610. JPU_ENC_CAPTURE;
  611. else
  612. fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_DEC_OUTPUT :
  613. JPU_DEC_CAPTURE;
  614. for (i = 0; i < ARRAY_SIZE(jpu_formats); i++) {
  615. struct jpu_fmt *fmt = &jpu_formats[i];
  616. if (fmt->fourcc == pixelformat && fmt->types & fmt_flag)
  617. return fmt;
  618. }
  619. return NULL;
  620. }
  621. static int jpu_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
  622. {
  623. unsigned int i, num = 0;
  624. for (i = 0; i < ARRAY_SIZE(jpu_formats); ++i) {
  625. if (jpu_formats[i].types & type) {
  626. if (num == f->index)
  627. break;
  628. ++num;
  629. }
  630. }
  631. if (i >= ARRAY_SIZE(jpu_formats))
  632. return -EINVAL;
  633. f->pixelformat = jpu_formats[i].fourcc;
  634. return 0;
  635. }
  636. static int jpu_enum_fmt_cap(struct file *file, void *priv,
  637. struct v4l2_fmtdesc *f)
  638. {
  639. struct jpu_ctx *ctx = fh_to_ctx(priv);
  640. return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_CAPTURE :
  641. JPU_DEC_CAPTURE);
  642. }
  643. static int jpu_enum_fmt_out(struct file *file, void *priv,
  644. struct v4l2_fmtdesc *f)
  645. {
  646. struct jpu_ctx *ctx = fh_to_ctx(priv);
  647. return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_OUTPUT : JPU_DEC_OUTPUT);
  648. }
  649. static struct jpu_q_data *jpu_get_q_data(struct jpu_ctx *ctx,
  650. enum v4l2_buf_type type)
  651. {
  652. if (V4L2_TYPE_IS_OUTPUT(type))
  653. return &ctx->out_q;
  654. else
  655. return &ctx->cap_q;
  656. }
  657. static void jpu_bound_align_image(u32 *w, unsigned int w_min,
  658. unsigned int w_max, unsigned int w_align,
  659. u32 *h, unsigned int h_min,
  660. unsigned int h_max, unsigned int h_align)
  661. {
  662. unsigned int width, height, w_step, h_step;
  663. width = *w;
  664. height = *h;
  665. w_step = 1U << w_align;
  666. h_step = 1U << h_align;
  667. v4l_bound_align_image(w, w_min, w_max, w_align, h, h_min, h_max,
  668. h_align, 3);
  669. if (*w < width && *w + w_step < w_max)
  670. *w += w_step;
  671. if (*h < height && *h + h_step < h_max)
  672. *h += h_step;
  673. }
  674. static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
  675. struct v4l2_pix_format_mplane *pix,
  676. enum v4l2_buf_type type)
  677. {
  678. struct jpu_fmt *fmt;
  679. unsigned int f_type, w, h;
  680. f_type = V4L2_TYPE_IS_OUTPUT(type) ? JPU_FMT_TYPE_OUTPUT :
  681. JPU_FMT_TYPE_CAPTURE;
  682. fmt = jpu_find_format(ctx->encoder, pix->pixelformat, f_type);
  683. if (!fmt) {
  684. unsigned int pixelformat;
  685. dev_dbg(ctx->jpu->dev, "unknown format; set default format\n");
  686. if (ctx->encoder)
  687. pixelformat = f_type == JPU_FMT_TYPE_OUTPUT ?
  688. V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
  689. else
  690. pixelformat = f_type == JPU_FMT_TYPE_CAPTURE ?
  691. V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
  692. fmt = jpu_find_format(ctx->encoder, pixelformat, f_type);
  693. }
  694. pix->pixelformat = fmt->fourcc;
  695. pix->colorspace = fmt->colorspace;
  696. pix->field = V4L2_FIELD_NONE;
  697. pix->num_planes = fmt->num_planes;
  698. memset(pix->reserved, 0, sizeof(pix->reserved));
  699. jpu_bound_align_image(&pix->width, JPU_WIDTH_MIN, JPU_WIDTH_MAX,
  700. fmt->h_align, &pix->height, JPU_HEIGHT_MIN,
  701. JPU_HEIGHT_MAX, fmt->v_align);
  702. w = pix->width;
  703. h = pix->height;
  704. if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
  705. /* ignore userspaces's sizeimage for encoding */
  706. if (pix->plane_fmt[0].sizeimage <= 0 || ctx->encoder)
  707. pix->plane_fmt[0].sizeimage = JPU_JPEG_HDR_SIZE +
  708. (JPU_JPEG_MAX_BYTES_PER_PIXEL * w * h);
  709. pix->plane_fmt[0].bytesperline = 0;
  710. memset(pix->plane_fmt[0].reserved, 0,
  711. sizeof(pix->plane_fmt[0].reserved));
  712. } else {
  713. unsigned int i, bpl = 0;
  714. for (i = 0; i < pix->num_planes; ++i)
  715. bpl = max(bpl, pix->plane_fmt[i].bytesperline);
  716. bpl = clamp_t(unsigned int, bpl, w, JPU_WIDTH_MAX);
  717. bpl = round_up(bpl, JPU_MEMALIGN);
  718. for (i = 0; i < pix->num_planes; ++i) {
  719. pix->plane_fmt[i].bytesperline = bpl;
  720. pix->plane_fmt[i].sizeimage = bpl * h * fmt->bpp[i] / 8;
  721. memset(pix->plane_fmt[i].reserved, 0,
  722. sizeof(pix->plane_fmt[i].reserved));
  723. }
  724. }
  725. if (fmtinfo)
  726. *fmtinfo = fmt;
  727. return 0;
  728. }
  729. static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
  730. {
  731. struct jpu_ctx *ctx = fh_to_ctx(priv);
  732. if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
  733. return -EINVAL;
  734. return __jpu_try_fmt(ctx, NULL, &f->fmt.pix_mp, f->type);
  735. }
  736. static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
  737. {
  738. struct vb2_queue *vq;
  739. struct jpu_ctx *ctx = fh_to_ctx(priv);
  740. struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
  741. struct jpu_fmt *fmtinfo;
  742. struct jpu_q_data *q_data;
  743. int ret;
  744. vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
  745. if (!vq)
  746. return -EINVAL;
  747. if (vb2_is_busy(vq)) {
  748. v4l2_err(&ctx->jpu->v4l2_dev, "%s queue busy\n", __func__);
  749. return -EBUSY;
  750. }
  751. ret = __jpu_try_fmt(ctx, &fmtinfo, &f->fmt.pix_mp, f->type);
  752. if (ret < 0)
  753. return ret;
  754. q_data = jpu_get_q_data(ctx, f->type);
  755. q_data->format = f->fmt.pix_mp;
  756. q_data->fmtinfo = fmtinfo;
  757. return 0;
  758. }
  759. static int jpu_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
  760. {
  761. struct jpu_q_data *q_data;
  762. struct jpu_ctx *ctx = fh_to_ctx(priv);
  763. if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
  764. return -EINVAL;
  765. q_data = jpu_get_q_data(ctx, f->type);
  766. f->fmt.pix_mp = q_data->format;
  767. return 0;
  768. }
  769. /*
  770. * V4L2 controls
  771. */
  772. static int jpu_s_ctrl(struct v4l2_ctrl *ctrl)
  773. {
  774. struct jpu_ctx *ctx = ctrl_to_ctx(ctrl);
  775. unsigned long flags;
  776. spin_lock_irqsave(&ctx->jpu->lock, flags);
  777. if (ctrl->id == V4L2_CID_JPEG_COMPRESSION_QUALITY)
  778. ctx->compr_quality = ctrl->val;
  779. spin_unlock_irqrestore(&ctx->jpu->lock, flags);
  780. return 0;
  781. }
  782. static const struct v4l2_ctrl_ops jpu_ctrl_ops = {
  783. .s_ctrl = jpu_s_ctrl,
  784. };
  785. static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
  786. {
  787. struct jpu_ctx *ctx = fh_to_ctx(priv);
  788. struct jpu_q_data *src_q_data, *dst_q_data, *orig, adj, *ref;
  789. enum v4l2_buf_type adj_type;
  790. src_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
  791. dst_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
  792. if (ctx->encoder) {
  793. adj = *src_q_data;
  794. orig = src_q_data;
  795. ref = dst_q_data;
  796. adj_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
  797. } else {
  798. adj = *dst_q_data;
  799. orig = dst_q_data;
  800. ref = src_q_data;
  801. adj_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
  802. }
  803. adj.format.width = ref->format.width;
  804. adj.format.height = ref->format.height;
  805. __jpu_try_fmt(ctx, NULL, &adj.format, adj_type);
  806. if (adj.format.width != orig->format.width ||
  807. adj.format.height != orig->format.height) {
  808. dev_err(ctx->jpu->dev, "src and dst formats do not match.\n");
  809. /* maybe we can return -EPIPE here? */
  810. return -EINVAL;
  811. }
  812. return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type);
  813. }
  814. static const struct v4l2_ioctl_ops jpu_ioctl_ops = {
  815. .vidioc_querycap = jpu_querycap,
  816. .vidioc_enum_fmt_vid_cap_mplane = jpu_enum_fmt_cap,
  817. .vidioc_enum_fmt_vid_out_mplane = jpu_enum_fmt_out,
  818. .vidioc_g_fmt_vid_cap_mplane = jpu_g_fmt,
  819. .vidioc_g_fmt_vid_out_mplane = jpu_g_fmt,
  820. .vidioc_try_fmt_vid_cap_mplane = jpu_try_fmt,
  821. .vidioc_try_fmt_vid_out_mplane = jpu_try_fmt,
  822. .vidioc_s_fmt_vid_cap_mplane = jpu_s_fmt,
  823. .vidioc_s_fmt_vid_out_mplane = jpu_s_fmt,
  824. .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
  825. .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
  826. .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
  827. .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
  828. .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
  829. .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
  830. .vidioc_streamon = jpu_streamon,
  831. .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
  832. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  833. .vidioc_unsubscribe_event = v4l2_event_unsubscribe
  834. };
  835. static int jpu_controls_create(struct jpu_ctx *ctx)
  836. {
  837. struct v4l2_ctrl *ctrl;
  838. int ret;
  839. v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
  840. ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler, &jpu_ctrl_ops,
  841. V4L2_CID_JPEG_COMPRESSION_QUALITY,
  842. 0, JPU_MAX_QUALITY - 1, 1, 0);
  843. if (ctx->ctrl_handler.error) {
  844. ret = ctx->ctrl_handler.error;
  845. goto error_free;
  846. }
  847. if (!ctx->encoder)
  848. ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
  849. V4L2_CTRL_FLAG_READ_ONLY;
  850. ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
  851. if (ret < 0)
  852. goto error_free;
  853. return 0;
  854. error_free:
  855. v4l2_ctrl_handler_free(&ctx->ctrl_handler);
  856. return ret;
  857. }
  858. /*
  859. * ============================================================================
  860. * Queue operations
  861. * ============================================================================
  862. */
  863. static int jpu_queue_setup(struct vb2_queue *vq,
  864. const void *parg,
  865. unsigned int *nbuffers, unsigned int *nplanes,
  866. unsigned int sizes[], void *alloc_ctxs[])
  867. {
  868. const struct v4l2_format *fmt = parg;
  869. struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
  870. struct jpu_q_data *q_data;
  871. unsigned int i;
  872. q_data = jpu_get_q_data(ctx, vq->type);
  873. *nplanes = q_data->format.num_planes;
  874. for (i = 0; i < *nplanes; i++) {
  875. unsigned int q_size = q_data->format.plane_fmt[i].sizeimage;
  876. unsigned int f_size = fmt ?
  877. fmt->fmt.pix_mp.plane_fmt[i].sizeimage : 0;
  878. if (fmt && f_size < q_size)
  879. return -EINVAL;
  880. sizes[i] = fmt ? f_size : q_size;
  881. alloc_ctxs[i] = ctx->jpu->alloc_ctx;
  882. }
  883. return 0;
  884. }
  885. static int jpu_buf_prepare(struct vb2_buffer *vb)
  886. {
  887. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  888. struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
  889. struct jpu_q_data *q_data;
  890. unsigned int i;
  891. q_data = jpu_get_q_data(ctx, vb->vb2_queue->type);
  892. if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
  893. if (vbuf->field == V4L2_FIELD_ANY)
  894. vbuf->field = V4L2_FIELD_NONE;
  895. if (vbuf->field != V4L2_FIELD_NONE) {
  896. dev_err(ctx->jpu->dev, "%s field isn't supported\n",
  897. __func__);
  898. return -EINVAL;
  899. }
  900. }
  901. for (i = 0; i < q_data->format.num_planes; i++) {
  902. unsigned long size = q_data->format.plane_fmt[i].sizeimage;
  903. if (vb2_plane_size(vb, i) < size) {
  904. dev_err(ctx->jpu->dev,
  905. "%s: data will not fit into plane (%lu < %lu)\n",
  906. __func__, vb2_plane_size(vb, i), size);
  907. return -EINVAL;
  908. }
  909. /* decoder capture queue */
  910. if (!ctx->encoder && !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type))
  911. vb2_set_plane_payload(vb, i, size);
  912. }
  913. return 0;
  914. }
  915. static void jpu_buf_queue(struct vb2_buffer *vb)
  916. {
  917. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  918. struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
  919. if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
  920. struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
  921. struct jpu_q_data *q_data, adjust;
  922. void *buffer = vb2_plane_vaddr(vb, 0);
  923. unsigned long buf_size = vb2_get_plane_payload(vb, 0);
  924. unsigned int width, height;
  925. u8 subsampling = jpu_parse_hdr(buffer, buf_size, &width,
  926. &height);
  927. /* check if JPEG data basic parsing was successful */
  928. if (subsampling != JPU_JPEG_422 && subsampling != JPU_JPEG_420)
  929. goto format_error;
  930. q_data = &ctx->out_q;
  931. adjust = *q_data;
  932. adjust.format.width = width;
  933. adjust.format.height = height;
  934. __jpu_try_fmt(ctx, &adjust.fmtinfo, &adjust.format,
  935. V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
  936. if (adjust.format.width != q_data->format.width ||
  937. adjust.format.height != q_data->format.height)
  938. goto format_error;
  939. /*
  940. * keep subsampling in buffer to check it
  941. * for compatibility in device_run
  942. */
  943. jpu_buf->subsampling = subsampling;
  944. }
  945. if (ctx->fh.m2m_ctx)
  946. v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
  947. return;
  948. format_error:
  949. dev_err(ctx->jpu->dev, "incompatible or corrupted JPEG data\n");
  950. vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
  951. }
  952. static void jpu_buf_finish(struct vb2_buffer *vb)
  953. {
  954. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  955. struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
  956. struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
  957. struct jpu_q_data *q_data = &ctx->out_q;
  958. enum v4l2_buf_type type = vb->vb2_queue->type;
  959. u8 *buffer;
  960. if (vb->state == VB2_BUF_STATE_DONE)
  961. vbuf->sequence = jpu_get_q_data(ctx, type)->sequence++;
  962. if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE ||
  963. V4L2_TYPE_IS_OUTPUT(type))
  964. return;
  965. buffer = vb2_plane_vaddr(vb, 0);
  966. memcpy(buffer, jpeg_hdrs[jpu_buf->compr_quality], JPU_JPEG_HDR_SIZE);
  967. *(__be16 *)(buffer + JPU_JPEG_HEIGHT_OFFSET) =
  968. cpu_to_be16(q_data->format.height);
  969. *(__be16 *)(buffer + JPU_JPEG_WIDTH_OFFSET) =
  970. cpu_to_be16(q_data->format.width);
  971. *(buffer + JPU_JPEG_SUBS_OFFSET) = q_data->fmtinfo->subsampling;
  972. }
  973. static int jpu_start_streaming(struct vb2_queue *vq, unsigned count)
  974. {
  975. struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
  976. struct jpu_q_data *q_data = jpu_get_q_data(ctx, vq->type);
  977. q_data->sequence = 0;
  978. return 0;
  979. }
  980. static void jpu_stop_streaming(struct vb2_queue *vq)
  981. {
  982. struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
  983. struct vb2_v4l2_buffer *vb;
  984. unsigned long flags;
  985. for (;;) {
  986. if (V4L2_TYPE_IS_OUTPUT(vq->type))
  987. vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
  988. else
  989. vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
  990. if (vb == NULL)
  991. return;
  992. spin_lock_irqsave(&ctx->jpu->lock, flags);
  993. v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
  994. spin_unlock_irqrestore(&ctx->jpu->lock, flags);
  995. }
  996. }
  997. static struct vb2_ops jpu_qops = {
  998. .queue_setup = jpu_queue_setup,
  999. .buf_prepare = jpu_buf_prepare,
  1000. .buf_queue = jpu_buf_queue,
  1001. .buf_finish = jpu_buf_finish,
  1002. .start_streaming = jpu_start_streaming,
  1003. .stop_streaming = jpu_stop_streaming,
  1004. .wait_prepare = vb2_ops_wait_prepare,
  1005. .wait_finish = vb2_ops_wait_finish,
  1006. };
  1007. static int jpu_queue_init(void *priv, struct vb2_queue *src_vq,
  1008. struct vb2_queue *dst_vq)
  1009. {
  1010. struct jpu_ctx *ctx = priv;
  1011. int ret;
  1012. memset(src_vq, 0, sizeof(*src_vq));
  1013. src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
  1014. src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
  1015. src_vq->drv_priv = ctx;
  1016. src_vq->buf_struct_size = sizeof(struct jpu_buffer);
  1017. src_vq->ops = &jpu_qops;
  1018. src_vq->mem_ops = &vb2_dma_contig_memops;
  1019. src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1020. src_vq->lock = &ctx->jpu->mutex;
  1021. ret = vb2_queue_init(src_vq);
  1022. if (ret)
  1023. return ret;
  1024. memset(dst_vq, 0, sizeof(*dst_vq));
  1025. dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
  1026. dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
  1027. dst_vq->drv_priv = ctx;
  1028. dst_vq->buf_struct_size = sizeof(struct jpu_buffer);
  1029. dst_vq->ops = &jpu_qops;
  1030. dst_vq->mem_ops = &vb2_dma_contig_memops;
  1031. dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1032. dst_vq->lock = &ctx->jpu->mutex;
  1033. return vb2_queue_init(dst_vq);
  1034. }
  1035. /*
  1036. * ============================================================================
  1037. * Device file operations
  1038. * ============================================================================
  1039. */
  1040. static int jpu_open(struct file *file)
  1041. {
  1042. struct jpu *jpu = video_drvdata(file);
  1043. struct video_device *vfd = video_devdata(file);
  1044. struct jpu_ctx *ctx;
  1045. int ret;
  1046. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1047. if (!ctx)
  1048. return -ENOMEM;
  1049. v4l2_fh_init(&ctx->fh, vfd);
  1050. ctx->fh.ctrl_handler = &ctx->ctrl_handler;
  1051. file->private_data = &ctx->fh;
  1052. v4l2_fh_add(&ctx->fh);
  1053. ctx->jpu = jpu;
  1054. ctx->encoder = vfd == &jpu->vfd_encoder;
  1055. __jpu_try_fmt(ctx, &ctx->out_q.fmtinfo, &ctx->out_q.format,
  1056. V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
  1057. __jpu_try_fmt(ctx, &ctx->cap_q.fmtinfo, &ctx->cap_q.format,
  1058. V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
  1059. ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpu->m2m_dev, ctx, jpu_queue_init);
  1060. if (IS_ERR(ctx->fh.m2m_ctx)) {
  1061. ret = PTR_ERR(ctx->fh.m2m_ctx);
  1062. goto v4l_prepare_rollback;
  1063. }
  1064. ret = jpu_controls_create(ctx);
  1065. if (ret < 0)
  1066. goto v4l_prepare_rollback;
  1067. if (mutex_lock_interruptible(&jpu->mutex)) {
  1068. ret = -ERESTARTSYS;
  1069. goto v4l_prepare_rollback;
  1070. }
  1071. if (jpu->ref_count == 0) {
  1072. ret = clk_prepare_enable(jpu->clk);
  1073. if (ret < 0)
  1074. goto device_prepare_rollback;
  1075. /* ...issue software reset */
  1076. ret = jpu_reset(jpu);
  1077. if (ret)
  1078. goto jpu_reset_rollback;
  1079. }
  1080. jpu->ref_count++;
  1081. mutex_unlock(&jpu->mutex);
  1082. return 0;
  1083. jpu_reset_rollback:
  1084. clk_disable_unprepare(jpu->clk);
  1085. device_prepare_rollback:
  1086. mutex_unlock(&jpu->mutex);
  1087. v4l_prepare_rollback:
  1088. v4l2_fh_del(&ctx->fh);
  1089. v4l2_fh_exit(&ctx->fh);
  1090. kfree(ctx);
  1091. return ret;
  1092. }
  1093. static int jpu_release(struct file *file)
  1094. {
  1095. struct jpu *jpu = video_drvdata(file);
  1096. struct jpu_ctx *ctx = fh_to_ctx(file->private_data);
  1097. mutex_lock(&jpu->mutex);
  1098. if (--jpu->ref_count == 0)
  1099. clk_disable_unprepare(jpu->clk);
  1100. mutex_unlock(&jpu->mutex);
  1101. v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
  1102. v4l2_ctrl_handler_free(&ctx->ctrl_handler);
  1103. v4l2_fh_del(&ctx->fh);
  1104. v4l2_fh_exit(&ctx->fh);
  1105. kfree(ctx);
  1106. return 0;
  1107. }
  1108. static const struct v4l2_file_operations jpu_fops = {
  1109. .owner = THIS_MODULE,
  1110. .open = jpu_open,
  1111. .release = jpu_release,
  1112. .unlocked_ioctl = video_ioctl2,
  1113. .poll = v4l2_m2m_fop_poll,
  1114. .mmap = v4l2_m2m_fop_mmap,
  1115. };
  1116. /*
  1117. * ============================================================================
  1118. * mem2mem callbacks
  1119. * ============================================================================
  1120. */
  1121. static void jpu_cleanup(struct jpu_ctx *ctx, bool reset)
  1122. {
  1123. /* remove current buffers and finish job */
  1124. struct vb2_v4l2_buffer *src_buf, *dst_buf;
  1125. unsigned long flags;
  1126. spin_lock_irqsave(&ctx->jpu->lock, flags);
  1127. src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
  1128. dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
  1129. v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
  1130. v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
  1131. /* ...and give it a chance on next run */
  1132. if (reset)
  1133. jpu_write(ctx->jpu, JCCMD_SRST, JCCMD);
  1134. spin_unlock_irqrestore(&ctx->jpu->lock, flags);
  1135. v4l2_m2m_job_finish(ctx->jpu->m2m_dev, ctx->fh.m2m_ctx);
  1136. }
  1137. static void jpu_device_run(void *priv)
  1138. {
  1139. struct jpu_ctx *ctx = priv;
  1140. struct jpu *jpu = ctx->jpu;
  1141. struct jpu_buffer *jpu_buf;
  1142. struct jpu_q_data *q_data;
  1143. struct vb2_v4l2_buffer *src_buf, *dst_buf;
  1144. unsigned int w, h, bpl;
  1145. unsigned char num_planes, subsampling;
  1146. unsigned long flags;
  1147. /* ...wait until module reset completes; we have mutex locked here */
  1148. if (jpu_wait_reset(jpu)) {
  1149. jpu_cleanup(ctx, true);
  1150. return;
  1151. }
  1152. spin_lock_irqsave(&ctx->jpu->lock, flags);
  1153. jpu->curr = ctx;
  1154. src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
  1155. dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
  1156. if (ctx->encoder) {
  1157. jpu_buf = vb2_to_jpu_buffer(dst_buf);
  1158. q_data = &ctx->out_q;
  1159. } else {
  1160. jpu_buf = vb2_to_jpu_buffer(src_buf);
  1161. q_data = &ctx->cap_q;
  1162. }
  1163. w = q_data->format.width;
  1164. h = q_data->format.height;
  1165. bpl = q_data->format.plane_fmt[0].bytesperline;
  1166. num_planes = q_data->fmtinfo->num_planes;
  1167. subsampling = q_data->fmtinfo->subsampling;
  1168. if (ctx->encoder) {
  1169. unsigned long src_1_addr, src_2_addr, dst_addr;
  1170. unsigned int redu, inft;
  1171. dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
  1172. src_1_addr =
  1173. vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
  1174. if (num_planes > 1)
  1175. src_2_addr = vb2_dma_contig_plane_dma_addr(
  1176. &src_buf->vb2_buf, 1);
  1177. else
  1178. src_2_addr = src_1_addr + w * h;
  1179. jpu_buf->compr_quality = ctx->compr_quality;
  1180. if (subsampling == JPU_JPEG_420) {
  1181. redu = JCMOD_REDU_420;
  1182. inft = JIFECNT_INFT_420;
  1183. } else {
  1184. redu = JCMOD_REDU_422;
  1185. inft = JIFECNT_INFT_422;
  1186. }
  1187. /* only no marker mode works for encoding */
  1188. jpu_write(jpu, JCMOD_DSP_ENC | JCMOD_PCTR | redu |
  1189. JCMOD_MSKIP_ENABLE, JCMOD);
  1190. jpu_write(jpu, JIFECNT_SWAP_WB | inft, JIFECNT);
  1191. jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
  1192. jpu_write(jpu, JINTE_TRANSF_COMPL, JINTE);
  1193. /* Y and C components source addresses */
  1194. jpu_write(jpu, src_1_addr, JIFESYA1);
  1195. jpu_write(jpu, src_2_addr, JIFESCA1);
  1196. /* memory width */
  1197. jpu_write(jpu, bpl, JIFESMW);
  1198. jpu_write(jpu, (w >> 8) & JCSZ_MASK, JCHSZU);
  1199. jpu_write(jpu, w & JCSZ_MASK, JCHSZD);
  1200. jpu_write(jpu, (h >> 8) & JCSZ_MASK, JCVSZU);
  1201. jpu_write(jpu, h & JCSZ_MASK, JCVSZD);
  1202. jpu_write(jpu, w, JIFESHSZ);
  1203. jpu_write(jpu, h, JIFESVSZ);
  1204. jpu_write(jpu, dst_addr + JPU_JPEG_HDR_SIZE, JIFEDA1);
  1205. jpu_write(jpu, 0 << JCQTN_SHIFT(1) | 1 << JCQTN_SHIFT(2) |
  1206. 1 << JCQTN_SHIFT(3), JCQTN);
  1207. jpu_write(jpu, 0 << JCHTN_AC_SHIFT(1) | 0 << JCHTN_DC_SHIFT(1) |
  1208. 1 << JCHTN_AC_SHIFT(2) | 1 << JCHTN_DC_SHIFT(2) |
  1209. 1 << JCHTN_AC_SHIFT(3) | 1 << JCHTN_DC_SHIFT(3),
  1210. JCHTN);
  1211. jpu_set_qtbl(jpu, ctx->compr_quality);
  1212. jpu_set_htbl(jpu);
  1213. } else {
  1214. unsigned long src_addr, dst_1_addr, dst_2_addr;
  1215. if (jpu_buf->subsampling != subsampling) {
  1216. dev_err(ctx->jpu->dev,
  1217. "src and dst formats do not match.\n");
  1218. spin_unlock_irqrestore(&ctx->jpu->lock, flags);
  1219. jpu_cleanup(ctx, false);
  1220. return;
  1221. }
  1222. src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
  1223. dst_1_addr =
  1224. vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
  1225. if (q_data->fmtinfo->num_planes > 1)
  1226. dst_2_addr = vb2_dma_contig_plane_dma_addr(
  1227. &dst_buf->vb2_buf, 1);
  1228. else
  1229. dst_2_addr = dst_1_addr + w * h;
  1230. /* ...set up decoder operation */
  1231. jpu_write(jpu, JCMOD_DSP_DEC | JCMOD_PCTR, JCMOD);
  1232. jpu_write(jpu, JIFECNT_SWAP_WB, JIFECNT);
  1233. jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
  1234. /* ...enable interrupts on transfer completion and d-g error */
  1235. jpu_write(jpu, JINTE_TRANSF_COMPL | JINTE_ERR, JINTE);
  1236. /* ...set source/destination addresses of encoded data */
  1237. jpu_write(jpu, src_addr, JIFDSA1);
  1238. jpu_write(jpu, dst_1_addr, JIFDDYA1);
  1239. jpu_write(jpu, dst_2_addr, JIFDDCA1);
  1240. jpu_write(jpu, bpl, JIFDDMW);
  1241. }
  1242. /* ...start encoder/decoder operation */
  1243. jpu_write(jpu, JCCMD_JSRT, JCCMD);
  1244. spin_unlock_irqrestore(&ctx->jpu->lock, flags);
  1245. }
  1246. static int jpu_job_ready(void *priv)
  1247. {
  1248. return 1;
  1249. }
  1250. static void jpu_job_abort(void *priv)
  1251. {
  1252. struct jpu_ctx *ctx = priv;
  1253. if (!wait_event_timeout(ctx->jpu->irq_queue, !ctx->jpu->curr,
  1254. msecs_to_jiffies(JPU_JOB_TIMEOUT)))
  1255. jpu_cleanup(ctx, true);
  1256. }
  1257. static struct v4l2_m2m_ops jpu_m2m_ops = {
  1258. .device_run = jpu_device_run,
  1259. .job_ready = jpu_job_ready,
  1260. .job_abort = jpu_job_abort,
  1261. };
  1262. /*
  1263. * ============================================================================
  1264. * IRQ handler
  1265. * ============================================================================
  1266. */
  1267. static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
  1268. {
  1269. struct jpu *jpu = dev_id;
  1270. struct jpu_ctx *curr_ctx;
  1271. struct vb2_v4l2_buffer *src_buf, *dst_buf;
  1272. unsigned int int_status;
  1273. int_status = jpu_read(jpu, JINTS);
  1274. /* ...spurious interrupt */
  1275. if (!((JINTS_TRANSF_COMPL | JINTS_PROCESS_COMPL | JINTS_ERR) &
  1276. int_status))
  1277. return IRQ_NONE;
  1278. /* ...clear interrupts */
  1279. jpu_write(jpu, ~(int_status & JINTS_MASK), JINTS);
  1280. if (int_status & (JINTS_ERR | JINTS_PROCESS_COMPL))
  1281. jpu_write(jpu, JCCMD_JEND, JCCMD);
  1282. spin_lock(&jpu->lock);
  1283. if ((int_status & JINTS_PROCESS_COMPL) &&
  1284. !(int_status & JINTS_TRANSF_COMPL))
  1285. goto handled;
  1286. curr_ctx = v4l2_m2m_get_curr_priv(jpu->m2m_dev);
  1287. if (!curr_ctx) {
  1288. /* ...instance is not running */
  1289. dev_err(jpu->dev, "no active context for m2m\n");
  1290. goto handled;
  1291. }
  1292. src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
  1293. dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
  1294. if (int_status & JINTS_TRANSF_COMPL) {
  1295. if (curr_ctx->encoder) {
  1296. unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16
  1297. | jpu_read(jpu, JCDTCM) << 8
  1298. | jpu_read(jpu, JCDTCD);
  1299. vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
  1300. payload_size + JPU_JPEG_HDR_SIZE);
  1301. }
  1302. dst_buf->field = src_buf->field;
  1303. dst_buf->timestamp = src_buf->timestamp;
  1304. if (src_buf->flags & V4L2_BUF_FLAG_TIMECODE)
  1305. dst_buf->timecode = src_buf->timecode;
  1306. dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  1307. dst_buf->flags |= src_buf->flags &
  1308. V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  1309. dst_buf->flags = src_buf->flags &
  1310. (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME |
  1311. V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME |
  1312. V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
  1313. v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
  1314. v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
  1315. } else if (int_status & JINTS_ERR) {
  1316. unsigned char error = jpu_read(jpu, JCDERR) & JCDERR_MASK;
  1317. dev_dbg(jpu->dev, "processing error: %#X: %s\n", error,
  1318. error_to_text[error]);
  1319. v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
  1320. v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
  1321. }
  1322. jpu->curr = NULL;
  1323. /* ...reset JPU after completion */
  1324. jpu_write(jpu, JCCMD_SRST, JCCMD);
  1325. spin_unlock(&jpu->lock);
  1326. v4l2_m2m_job_finish(jpu->m2m_dev, curr_ctx->fh.m2m_ctx);
  1327. /* ...wakeup abort routine if needed */
  1328. wake_up(&jpu->irq_queue);
  1329. return IRQ_HANDLED;
  1330. handled:
  1331. spin_unlock(&jpu->lock);
  1332. return IRQ_HANDLED;
  1333. }
  1334. /*
  1335. * ============================================================================
  1336. * Driver basic infrastructure
  1337. * ============================================================================
  1338. */
  1339. static const struct of_device_id jpu_dt_ids[] = {
  1340. { .compatible = "renesas,jpu-r8a7790" }, /* H2 */
  1341. { .compatible = "renesas,jpu-r8a7791" }, /* M2-W */
  1342. { .compatible = "renesas,jpu-r8a7792" }, /* V2H */
  1343. { .compatible = "renesas,jpu-r8a7793" }, /* M2-N */
  1344. { },
  1345. };
  1346. MODULE_DEVICE_TABLE(of, jpu_dt_ids);
  1347. static int jpu_probe(struct platform_device *pdev)
  1348. {
  1349. struct jpu *jpu;
  1350. struct resource *res;
  1351. int ret;
  1352. unsigned int i;
  1353. jpu = devm_kzalloc(&pdev->dev, sizeof(*jpu), GFP_KERNEL);
  1354. if (!jpu)
  1355. return -ENOMEM;
  1356. init_waitqueue_head(&jpu->irq_queue);
  1357. mutex_init(&jpu->mutex);
  1358. spin_lock_init(&jpu->lock);
  1359. jpu->dev = &pdev->dev;
  1360. /* memory-mapped registers */
  1361. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1362. jpu->regs = devm_ioremap_resource(&pdev->dev, res);
  1363. if (IS_ERR(jpu->regs))
  1364. return PTR_ERR(jpu->regs);
  1365. /* interrupt service routine registration */
  1366. jpu->irq = ret = platform_get_irq(pdev, 0);
  1367. if (ret < 0) {
  1368. dev_err(&pdev->dev, "cannot find IRQ\n");
  1369. return ret;
  1370. }
  1371. ret = devm_request_irq(&pdev->dev, jpu->irq, jpu_irq_handler, 0,
  1372. dev_name(&pdev->dev), jpu);
  1373. if (ret) {
  1374. dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpu->irq);
  1375. return ret;
  1376. }
  1377. /* clocks */
  1378. jpu->clk = devm_clk_get(&pdev->dev, NULL);
  1379. if (IS_ERR(jpu->clk)) {
  1380. dev_err(&pdev->dev, "cannot get clock\n");
  1381. return PTR_ERR(jpu->clk);
  1382. }
  1383. /* v4l2 device */
  1384. ret = v4l2_device_register(&pdev->dev, &jpu->v4l2_dev);
  1385. if (ret) {
  1386. dev_err(&pdev->dev, "Failed to register v4l2 device\n");
  1387. return ret;
  1388. }
  1389. /* mem2mem device */
  1390. jpu->m2m_dev = v4l2_m2m_init(&jpu_m2m_ops);
  1391. if (IS_ERR(jpu->m2m_dev)) {
  1392. v4l2_err(&jpu->v4l2_dev, "Failed to init mem2mem device\n");
  1393. ret = PTR_ERR(jpu->m2m_dev);
  1394. goto device_register_rollback;
  1395. }
  1396. jpu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
  1397. if (IS_ERR(jpu->alloc_ctx)) {
  1398. v4l2_err(&jpu->v4l2_dev, "Failed to init memory allocator\n");
  1399. ret = PTR_ERR(jpu->alloc_ctx);
  1400. goto m2m_init_rollback;
  1401. }
  1402. /* fill in qantization and Huffman tables for encoder */
  1403. for (i = 0; i < JPU_MAX_QUALITY; i++)
  1404. jpu_generate_hdr(i, (unsigned char *)jpeg_hdrs[i]);
  1405. strlcpy(jpu->vfd_encoder.name, DRV_NAME, sizeof(jpu->vfd_encoder.name));
  1406. jpu->vfd_encoder.fops = &jpu_fops;
  1407. jpu->vfd_encoder.ioctl_ops = &jpu_ioctl_ops;
  1408. jpu->vfd_encoder.minor = -1;
  1409. jpu->vfd_encoder.release = video_device_release_empty;
  1410. jpu->vfd_encoder.lock = &jpu->mutex;
  1411. jpu->vfd_encoder.v4l2_dev = &jpu->v4l2_dev;
  1412. jpu->vfd_encoder.vfl_dir = VFL_DIR_M2M;
  1413. ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_GRABBER, -1);
  1414. if (ret) {
  1415. v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
  1416. goto vb2_allocator_rollback;
  1417. }
  1418. video_set_drvdata(&jpu->vfd_encoder, jpu);
  1419. strlcpy(jpu->vfd_decoder.name, DRV_NAME, sizeof(jpu->vfd_decoder.name));
  1420. jpu->vfd_decoder.fops = &jpu_fops;
  1421. jpu->vfd_decoder.ioctl_ops = &jpu_ioctl_ops;
  1422. jpu->vfd_decoder.minor = -1;
  1423. jpu->vfd_decoder.release = video_device_release_empty;
  1424. jpu->vfd_decoder.lock = &jpu->mutex;
  1425. jpu->vfd_decoder.v4l2_dev = &jpu->v4l2_dev;
  1426. jpu->vfd_decoder.vfl_dir = VFL_DIR_M2M;
  1427. ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_GRABBER, -1);
  1428. if (ret) {
  1429. v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
  1430. goto enc_vdev_register_rollback;
  1431. }
  1432. video_set_drvdata(&jpu->vfd_decoder, jpu);
  1433. platform_set_drvdata(pdev, jpu);
  1434. v4l2_info(&jpu->v4l2_dev, "encoder device registered as /dev/video%d\n",
  1435. jpu->vfd_encoder.num);
  1436. v4l2_info(&jpu->v4l2_dev, "decoder device registered as /dev/video%d\n",
  1437. jpu->vfd_decoder.num);
  1438. return 0;
  1439. enc_vdev_register_rollback:
  1440. video_unregister_device(&jpu->vfd_encoder);
  1441. vb2_allocator_rollback:
  1442. vb2_dma_contig_cleanup_ctx(jpu->alloc_ctx);
  1443. m2m_init_rollback:
  1444. v4l2_m2m_release(jpu->m2m_dev);
  1445. device_register_rollback:
  1446. v4l2_device_unregister(&jpu->v4l2_dev);
  1447. return ret;
  1448. }
  1449. static int jpu_remove(struct platform_device *pdev)
  1450. {
  1451. struct jpu *jpu = platform_get_drvdata(pdev);
  1452. video_unregister_device(&jpu->vfd_decoder);
  1453. video_unregister_device(&jpu->vfd_encoder);
  1454. vb2_dma_contig_cleanup_ctx(jpu->alloc_ctx);
  1455. v4l2_m2m_release(jpu->m2m_dev);
  1456. v4l2_device_unregister(&jpu->v4l2_dev);
  1457. return 0;
  1458. }
  1459. #ifdef CONFIG_PM_SLEEP
  1460. static int jpu_suspend(struct device *dev)
  1461. {
  1462. struct jpu *jpu = dev_get_drvdata(dev);
  1463. if (jpu->ref_count == 0)
  1464. return 0;
  1465. clk_disable_unprepare(jpu->clk);
  1466. return 0;
  1467. }
  1468. static int jpu_resume(struct device *dev)
  1469. {
  1470. struct jpu *jpu = dev_get_drvdata(dev);
  1471. if (jpu->ref_count == 0)
  1472. return 0;
  1473. clk_prepare_enable(jpu->clk);
  1474. return 0;
  1475. }
  1476. #endif
  1477. static const struct dev_pm_ops jpu_pm_ops = {
  1478. SET_SYSTEM_SLEEP_PM_OPS(jpu_suspend, jpu_resume)
  1479. };
  1480. static struct platform_driver jpu_driver = {
  1481. .probe = jpu_probe,
  1482. .remove = jpu_remove,
  1483. .driver = {
  1484. .of_match_table = jpu_dt_ids,
  1485. .name = DRV_NAME,
  1486. .pm = &jpu_pm_ops,
  1487. },
  1488. };
  1489. module_platform_driver(jpu_driver);
  1490. MODULE_ALIAS("platform:" DRV_NAME);
  1491. MODULE_AUTHOR("Mikhail Ulianov <mikhail.ulyanov@cogentembedded.com>");
  1492. MODULE_DESCRIPTION("Renesas R-Car JPEG processing unit driver");
  1493. MODULE_LICENSE("GPL v2");