am437x-vpfe.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772
  1. /*
  2. * TI VPFE capture Driver
  3. *
  4. * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
  5. *
  6. * Benoit Parrot <bparrot@ti.com>
  7. * Lad, Prabhakar <prabhakar.csengg@gmail.com>
  8. *
  9. * This program is free software; you may redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  14. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  15. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  16. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  17. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  18. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  19. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. * SOFTWARE.
  21. */
  22. #include <linux/delay.h>
  23. #include <linux/err.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/pinctrl/consumer.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/slab.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/videodev2.h>
  34. #include <media/v4l2-common.h>
  35. #include <media/v4l2-ctrls.h>
  36. #include <media/v4l2-event.h>
  37. #include <media/v4l2-of.h>
  38. #include "am437x-vpfe.h"
  39. #define VPFE_MODULE_NAME "vpfe"
  40. #define VPFE_VERSION "0.1.0"
  41. static int debug;
  42. module_param(debug, int, 0644);
  43. MODULE_PARM_DESC(debug, "Debug level 0-8");
  44. #define vpfe_dbg(level, dev, fmt, arg...) \
  45. v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
  46. #define vpfe_info(dev, fmt, arg...) \
  47. v4l2_info(&dev->v4l2_dev, fmt, ##arg)
  48. #define vpfe_err(dev, fmt, arg...) \
  49. v4l2_err(&dev->v4l2_dev, fmt, ##arg)
  50. /* standard information */
  51. struct vpfe_standard {
  52. v4l2_std_id std_id;
  53. unsigned int width;
  54. unsigned int height;
  55. struct v4l2_fract pixelaspect;
  56. int frame_format;
  57. };
  58. static const struct vpfe_standard vpfe_standards[] = {
  59. {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
  60. {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
  61. };
  62. struct bus_format {
  63. unsigned int width;
  64. unsigned int bpp;
  65. };
  66. /*
  67. * struct vpfe_fmt - VPFE media bus format information
  68. * @name: V4L2 format description
  69. * @code: V4L2 media bus format code
  70. * @shifted: V4L2 media bus format code for the same pixel layout but
  71. * shifted to be 8 bits per pixel. =0 if format is not shiftable.
  72. * @pixelformat: V4L2 pixel format FCC identifier
  73. * @width: Bits per pixel (when transferred over a bus)
  74. * @bpp: Bytes per pixel (when stored in memory)
  75. * @supported: Indicates format supported by subdev
  76. */
  77. struct vpfe_fmt {
  78. const char *name;
  79. u32 fourcc;
  80. u32 code;
  81. struct bus_format l;
  82. struct bus_format s;
  83. bool supported;
  84. u32 index;
  85. };
  86. static struct vpfe_fmt formats[] = {
  87. {
  88. .name = "YUV 4:2:2 packed, YCbYCr",
  89. .fourcc = V4L2_PIX_FMT_YUYV,
  90. .code = MEDIA_BUS_FMT_YUYV8_2X8,
  91. .l.width = 10,
  92. .l.bpp = 4,
  93. .s.width = 8,
  94. .s.bpp = 2,
  95. .supported = false,
  96. }, {
  97. .name = "YUV 4:2:2 packed, CbYCrY",
  98. .fourcc = V4L2_PIX_FMT_UYVY,
  99. .code = MEDIA_BUS_FMT_UYVY8_2X8,
  100. .l.width = 10,
  101. .l.bpp = 4,
  102. .s.width = 8,
  103. .s.bpp = 2,
  104. .supported = false,
  105. }, {
  106. .name = "YUV 4:2:2 packed, YCrYCb",
  107. .fourcc = V4L2_PIX_FMT_YVYU,
  108. .code = MEDIA_BUS_FMT_YVYU8_2X8,
  109. .l.width = 10,
  110. .l.bpp = 4,
  111. .s.width = 8,
  112. .s.bpp = 2,
  113. .supported = false,
  114. }, {
  115. .name = "YUV 4:2:2 packed, CrYCbY",
  116. .fourcc = V4L2_PIX_FMT_VYUY,
  117. .code = MEDIA_BUS_FMT_VYUY8_2X8,
  118. .l.width = 10,
  119. .l.bpp = 4,
  120. .s.width = 8,
  121. .s.bpp = 2,
  122. .supported = false,
  123. }, {
  124. .name = "RAW8 BGGR",
  125. .fourcc = V4L2_PIX_FMT_SBGGR8,
  126. .code = MEDIA_BUS_FMT_SBGGR8_1X8,
  127. .l.width = 10,
  128. .l.bpp = 2,
  129. .s.width = 8,
  130. .s.bpp = 1,
  131. .supported = false,
  132. }, {
  133. .name = "RAW8 GBRG",
  134. .fourcc = V4L2_PIX_FMT_SGBRG8,
  135. .code = MEDIA_BUS_FMT_SGBRG8_1X8,
  136. .l.width = 10,
  137. .l.bpp = 2,
  138. .s.width = 8,
  139. .s.bpp = 1,
  140. .supported = false,
  141. }, {
  142. .name = "RAW8 GRBG",
  143. .fourcc = V4L2_PIX_FMT_SGRBG8,
  144. .code = MEDIA_BUS_FMT_SGRBG8_1X8,
  145. .l.width = 10,
  146. .l.bpp = 2,
  147. .s.width = 8,
  148. .s.bpp = 1,
  149. .supported = false,
  150. }, {
  151. .name = "RAW8 RGGB",
  152. .fourcc = V4L2_PIX_FMT_SRGGB8,
  153. .code = MEDIA_BUS_FMT_SRGGB8_1X8,
  154. .l.width = 10,
  155. .l.bpp = 2,
  156. .s.width = 8,
  157. .s.bpp = 1,
  158. .supported = false,
  159. }, {
  160. .name = "RGB565 (LE)",
  161. .fourcc = V4L2_PIX_FMT_RGB565,
  162. .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
  163. .l.width = 10,
  164. .l.bpp = 4,
  165. .s.width = 8,
  166. .s.bpp = 2,
  167. .supported = false,
  168. }, {
  169. .name = "RGB565 (BE)",
  170. .fourcc = V4L2_PIX_FMT_RGB565X,
  171. .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
  172. .l.width = 10,
  173. .l.bpp = 4,
  174. .s.width = 8,
  175. .s.bpp = 2,
  176. .supported = false,
  177. },
  178. };
  179. static int
  180. __vpfe_get_format(struct vpfe_device *vpfe,
  181. struct v4l2_format *format, unsigned int *bpp);
  182. static struct vpfe_fmt *find_format_by_code(unsigned int code)
  183. {
  184. struct vpfe_fmt *fmt;
  185. unsigned int k;
  186. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  187. fmt = &formats[k];
  188. if (fmt->code == code)
  189. return fmt;
  190. }
  191. return NULL;
  192. }
  193. static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
  194. {
  195. struct vpfe_fmt *fmt;
  196. unsigned int k;
  197. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  198. fmt = &formats[k];
  199. if (fmt->fourcc == pixelformat)
  200. return fmt;
  201. }
  202. return NULL;
  203. }
  204. static void
  205. mbus_to_pix(struct vpfe_device *vpfe,
  206. const struct v4l2_mbus_framefmt *mbus,
  207. struct v4l2_pix_format *pix, unsigned int *bpp)
  208. {
  209. struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
  210. unsigned int bus_width = sdinfo->vpfe_param.bus_width;
  211. struct vpfe_fmt *fmt;
  212. fmt = find_format_by_code(mbus->code);
  213. if (WARN_ON(fmt == NULL)) {
  214. pr_err("Invalid mbus code set\n");
  215. *bpp = 1;
  216. return;
  217. }
  218. memset(pix, 0, sizeof(*pix));
  219. v4l2_fill_pix_format(pix, mbus);
  220. pix->pixelformat = fmt->fourcc;
  221. *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
  222. /* pitch should be 32 bytes aligned */
  223. pix->bytesperline = ALIGN(pix->width * *bpp, 32);
  224. pix->sizeimage = pix->bytesperline * pix->height;
  225. }
  226. static void pix_to_mbus(struct vpfe_device *vpfe,
  227. struct v4l2_pix_format *pix_fmt,
  228. struct v4l2_mbus_framefmt *mbus_fmt)
  229. {
  230. struct vpfe_fmt *fmt;
  231. fmt = find_format_by_pix(pix_fmt->pixelformat);
  232. if (!fmt) {
  233. /* default to first entry */
  234. vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
  235. pix_fmt->pixelformat);
  236. fmt = &formats[0];
  237. }
  238. memset(mbus_fmt, 0, sizeof(*mbus_fmt));
  239. v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
  240. }
  241. /* Print Four-character-code (FOURCC) */
  242. static char *print_fourcc(u32 fmt)
  243. {
  244. static char code[5];
  245. code[0] = (unsigned char)(fmt & 0xff);
  246. code[1] = (unsigned char)((fmt >> 8) & 0xff);
  247. code[2] = (unsigned char)((fmt >> 16) & 0xff);
  248. code[3] = (unsigned char)((fmt >> 24) & 0xff);
  249. code[4] = '\0';
  250. return code;
  251. }
  252. static int
  253. cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
  254. {
  255. return lhs->type == rhs->type &&
  256. lhs->fmt.pix.width == rhs->fmt.pix.width &&
  257. lhs->fmt.pix.height == rhs->fmt.pix.height &&
  258. lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
  259. lhs->fmt.pix.field == rhs->fmt.pix.field &&
  260. lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
  261. lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
  262. lhs->fmt.pix.quantization == rhs->fmt.pix.quantization &&
  263. lhs->fmt.pix.xfer_func == rhs->fmt.pix.xfer_func;
  264. }
  265. static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
  266. {
  267. return ioread32(ccdc->ccdc_cfg.base_addr + offset);
  268. }
  269. static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
  270. {
  271. iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
  272. }
  273. static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
  274. {
  275. return container_of(ccdc, struct vpfe_device, ccdc);
  276. }
  277. static inline
  278. struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
  279. {
  280. return container_of(vb, struct vpfe_cap_buffer, vb);
  281. }
  282. static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
  283. {
  284. vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
  285. }
  286. static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
  287. {
  288. unsigned int cfg;
  289. if (!flag) {
  290. cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
  291. cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
  292. } else {
  293. cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
  294. }
  295. vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
  296. }
  297. static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
  298. struct v4l2_rect *image_win,
  299. enum ccdc_frmfmt frm_fmt,
  300. int bpp)
  301. {
  302. int horz_start, horz_nr_pixels;
  303. int vert_start, vert_nr_lines;
  304. int val, mid_img;
  305. /*
  306. * ppc - per pixel count. indicates how many pixels per cell
  307. * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
  308. * raw capture this is 1
  309. */
  310. horz_start = image_win->left * bpp;
  311. horz_nr_pixels = (image_win->width * bpp) - 1;
  312. vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
  313. horz_nr_pixels, VPFE_HORZ_INFO);
  314. vert_start = image_win->top;
  315. if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
  316. vert_nr_lines = (image_win->height >> 1) - 1;
  317. vert_start >>= 1;
  318. /* Since first line doesn't have any data */
  319. vert_start += 1;
  320. /* configure VDINT0 */
  321. val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
  322. } else {
  323. /* Since first line doesn't have any data */
  324. vert_start += 1;
  325. vert_nr_lines = image_win->height - 1;
  326. /*
  327. * configure VDINT0 and VDINT1. VDINT1 will be at half
  328. * of image height
  329. */
  330. mid_img = vert_start + (image_win->height / 2);
  331. val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
  332. (mid_img & VPFE_VDINT_VDINT1_MASK);
  333. }
  334. vpfe_reg_write(ccdc, val, VPFE_VDINT);
  335. vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
  336. vert_start, VPFE_VERT_START);
  337. vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
  338. }
  339. static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
  340. {
  341. struct vpfe_device *vpfe = to_vpfe(ccdc);
  342. vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
  343. vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
  344. vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
  345. vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
  346. vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
  347. vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
  348. vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
  349. vpfe_reg_read(ccdc, VPFE_SYNMODE));
  350. vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
  351. vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
  352. vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
  353. vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
  354. vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
  355. vpfe_reg_read(ccdc, VPFE_VERT_START));
  356. vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
  357. vpfe_reg_read(ccdc, VPFE_VERT_LINES));
  358. }
  359. static int
  360. vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
  361. struct vpfe_ccdc_config_params_raw *ccdcparam)
  362. {
  363. struct vpfe_device *vpfe = to_vpfe(ccdc);
  364. u8 max_gamma, max_data;
  365. if (!ccdcparam->alaw.enable)
  366. return 0;
  367. max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
  368. max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
  369. if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
  370. ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
  371. max_gamma > max_data) {
  372. vpfe_dbg(1, vpfe, "Invalid data line select\n");
  373. return -EINVAL;
  374. }
  375. return 0;
  376. }
  377. static void
  378. vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
  379. struct vpfe_ccdc_config_params_raw *raw_params)
  380. {
  381. struct vpfe_ccdc_config_params_raw *config_params =
  382. &ccdc->ccdc_cfg.bayer.config_params;
  383. *config_params = *raw_params;
  384. }
  385. /*
  386. * vpfe_ccdc_restore_defaults()
  387. * This function will write defaults to all CCDC registers
  388. */
  389. static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
  390. {
  391. int i;
  392. /* Disable CCDC */
  393. vpfe_pcr_enable(ccdc, 0);
  394. /* set all registers to default value */
  395. for (i = 4; i <= 0x94; i += 4)
  396. vpfe_reg_write(ccdc, 0, i);
  397. vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
  398. vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
  399. }
  400. static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
  401. {
  402. int dma_cntl, i, pcr;
  403. /* If the CCDC module is still busy wait for it to be done */
  404. for (i = 0; i < 10; i++) {
  405. usleep_range(5000, 6000);
  406. pcr = vpfe_reg_read(ccdc, VPFE_PCR);
  407. if (!pcr)
  408. break;
  409. /* make sure it it is disabled */
  410. vpfe_pcr_enable(ccdc, 0);
  411. }
  412. /* Disable CCDC by resetting all register to default POR values */
  413. vpfe_ccdc_restore_defaults(ccdc);
  414. /* if DMA_CNTL overflow bit is set. Clear it
  415. * It appears to take a while for this to become quiescent ~20ms
  416. */
  417. for (i = 0; i < 10; i++) {
  418. dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
  419. if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
  420. break;
  421. /* Clear the overflow bit */
  422. vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
  423. usleep_range(5000, 6000);
  424. }
  425. /* Disabled the module at the CONFIG level */
  426. vpfe_config_enable(ccdc, 0);
  427. pm_runtime_put_sync(dev);
  428. return 0;
  429. }
  430. static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
  431. {
  432. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  433. struct vpfe_ccdc_config_params_raw raw_params;
  434. int x;
  435. if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
  436. return -EINVAL;
  437. x = copy_from_user(&raw_params, params, sizeof(raw_params));
  438. if (x) {
  439. vpfe_dbg(1, vpfe,
  440. "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
  441. x);
  442. return -EFAULT;
  443. }
  444. if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
  445. vpfe_ccdc_update_raw_params(ccdc, &raw_params);
  446. return 0;
  447. }
  448. return -EINVAL;
  449. }
  450. /*
  451. * vpfe_ccdc_config_ycbcr()
  452. * This function will configure CCDC for YCbCr video capture
  453. */
  454. static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
  455. {
  456. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  457. struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
  458. u32 syn_mode;
  459. vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
  460. /*
  461. * first restore the CCDC registers to default values
  462. * This is important since we assume default values to be set in
  463. * a lot of registers that we didn't touch
  464. */
  465. vpfe_ccdc_restore_defaults(ccdc);
  466. /*
  467. * configure pixel format, frame format, configure video frame
  468. * format, enable output to SDRAM, enable internal timing generator
  469. * and 8bit pack mode
  470. */
  471. syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
  472. VPFE_SYN_MODE_INPMOD_SHIFT) |
  473. ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
  474. VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
  475. VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
  476. /* setup BT.656 sync mode */
  477. if (params->bt656_enable) {
  478. vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
  479. /*
  480. * configure the FID, VD, HD pin polarity,
  481. * fld,hd pol positive, vd negative, 8-bit data
  482. */
  483. syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
  484. if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
  485. syn_mode |= VPFE_SYN_MODE_10BITS;
  486. else
  487. syn_mode |= VPFE_SYN_MODE_8BITS;
  488. } else {
  489. /* y/c external sync mode */
  490. syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
  491. VPFE_FID_POL_SHIFT) |
  492. ((params->hd_pol & VPFE_HD_POL_MASK) <<
  493. VPFE_HD_POL_SHIFT) |
  494. ((params->vd_pol & VPFE_VD_POL_MASK) <<
  495. VPFE_VD_POL_SHIFT));
  496. }
  497. vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
  498. /* configure video window */
  499. vpfe_ccdc_setwin(ccdc, &params->win,
  500. params->frm_fmt, params->bytesperpixel);
  501. /*
  502. * configure the order of y cb cr in SDRAM, and disable latch
  503. * internal register on vsync
  504. */
  505. if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
  506. vpfe_reg_write(ccdc,
  507. (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
  508. VPFE_LATCH_ON_VSYNC_DISABLE |
  509. VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
  510. else
  511. vpfe_reg_write(ccdc,
  512. (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
  513. VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
  514. /*
  515. * configure the horizontal line offset. This should be a
  516. * on 32 byte boundary. So clear LSB 5 bits
  517. */
  518. vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
  519. /* configure the memory line offset */
  520. if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
  521. /* two fields are interleaved in memory */
  522. vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
  523. VPFE_SDOFST);
  524. }
  525. static void
  526. vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
  527. struct vpfe_ccdc_black_clamp *bclamp)
  528. {
  529. u32 val;
  530. if (!bclamp->enable) {
  531. /* configure DCSub */
  532. val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
  533. vpfe_reg_write(ccdc, val, VPFE_DCSUB);
  534. vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
  535. return;
  536. }
  537. /*
  538. * Configure gain, Start pixel, No of line to be avg,
  539. * No of pixel/line to be avg, & Enable the Black clamping
  540. */
  541. val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
  542. ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
  543. VPFE_BLK_ST_PXL_SHIFT) |
  544. ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
  545. VPFE_BLK_SAMPLE_LINE_SHIFT) |
  546. ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
  547. VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
  548. vpfe_reg_write(ccdc, val, VPFE_CLAMP);
  549. /* If Black clamping is enable then make dcsub 0 */
  550. vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
  551. }
  552. static void
  553. vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
  554. struct vpfe_ccdc_black_compensation *bcomp)
  555. {
  556. u32 val;
  557. val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
  558. ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
  559. VPFE_BLK_COMP_GB_COMP_SHIFT) |
  560. ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
  561. VPFE_BLK_COMP_GR_COMP_SHIFT) |
  562. ((bcomp->r & VPFE_BLK_COMP_MASK) <<
  563. VPFE_BLK_COMP_R_COMP_SHIFT));
  564. vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
  565. }
  566. /*
  567. * vpfe_ccdc_config_raw()
  568. * This function will configure CCDC for Raw capture mode
  569. */
  570. static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
  571. {
  572. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  573. struct vpfe_ccdc_config_params_raw *config_params =
  574. &ccdc->ccdc_cfg.bayer.config_params;
  575. struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
  576. unsigned int syn_mode;
  577. unsigned int val;
  578. vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
  579. /* Reset CCDC */
  580. vpfe_ccdc_restore_defaults(ccdc);
  581. /* Disable latching function registers on VSYNC */
  582. vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
  583. /*
  584. * Configure the vertical sync polarity(SYN_MODE.VDPOL),
  585. * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
  586. * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
  587. * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
  588. * SDRAM, enable internal timing generator
  589. */
  590. syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
  591. ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
  592. ((params->fid_pol & VPFE_FID_POL_MASK) <<
  593. VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
  594. VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
  595. ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
  596. VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
  597. VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
  598. VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
  599. /* Enable and configure aLaw register if needed */
  600. if (config_params->alaw.enable) {
  601. val = ((config_params->alaw.gamma_wd &
  602. VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
  603. vpfe_reg_write(ccdc, val, VPFE_ALAW);
  604. vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
  605. }
  606. /* Configure video window */
  607. vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt,
  608. params->bytesperpixel);
  609. /* Configure Black Clamp */
  610. vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
  611. /* Configure Black level compensation */
  612. vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
  613. /* If data size is 8 bit then pack the data */
  614. if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
  615. config_params->alaw.enable)
  616. syn_mode |= VPFE_DATA_PACK_ENABLE;
  617. /*
  618. * Configure Horizontal offset register. If pack 8 is enabled then
  619. * 1 pixel will take 1 byte
  620. */
  621. vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
  622. vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
  623. params->bytesperline, params->bytesperline);
  624. /* Set value for SDOFST */
  625. if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
  626. if (params->image_invert_enable) {
  627. /* For interlace inverse mode */
  628. vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
  629. VPFE_SDOFST);
  630. } else {
  631. /* For interlace non inverse mode */
  632. vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
  633. VPFE_SDOFST);
  634. }
  635. } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
  636. vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
  637. VPFE_SDOFST);
  638. }
  639. vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
  640. vpfe_reg_dump(ccdc);
  641. }
  642. static inline int
  643. vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
  644. enum ccdc_buftype buf_type)
  645. {
  646. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  647. ccdc->ccdc_cfg.bayer.buf_type = buf_type;
  648. else
  649. ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
  650. return 0;
  651. }
  652. static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
  653. {
  654. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  655. return ccdc->ccdc_cfg.bayer.buf_type;
  656. return ccdc->ccdc_cfg.ycbcr.buf_type;
  657. }
  658. static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
  659. {
  660. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  661. vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
  662. ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
  663. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  664. ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
  665. /*
  666. * Need to clear it in case it was left on
  667. * after the last capture.
  668. */
  669. ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
  670. switch (pixfmt) {
  671. case V4L2_PIX_FMT_SBGGR8:
  672. ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
  673. break;
  674. case V4L2_PIX_FMT_YUYV:
  675. case V4L2_PIX_FMT_UYVY:
  676. case V4L2_PIX_FMT_YUV420:
  677. case V4L2_PIX_FMT_NV12:
  678. case V4L2_PIX_FMT_RGB565X:
  679. break;
  680. case V4L2_PIX_FMT_SBGGR16:
  681. default:
  682. return -EINVAL;
  683. }
  684. } else {
  685. switch (pixfmt) {
  686. case V4L2_PIX_FMT_YUYV:
  687. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
  688. break;
  689. case V4L2_PIX_FMT_UYVY:
  690. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
  691. break;
  692. default:
  693. return -EINVAL;
  694. }
  695. }
  696. return 0;
  697. }
  698. static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
  699. {
  700. u32 pixfmt;
  701. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  702. pixfmt = V4L2_PIX_FMT_YUYV;
  703. } else {
  704. if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
  705. pixfmt = V4L2_PIX_FMT_YUYV;
  706. else
  707. pixfmt = V4L2_PIX_FMT_UYVY;
  708. }
  709. return pixfmt;
  710. }
  711. static int
  712. vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
  713. struct v4l2_rect *win, unsigned int bpp)
  714. {
  715. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  716. ccdc->ccdc_cfg.bayer.win = *win;
  717. ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
  718. ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
  719. } else {
  720. ccdc->ccdc_cfg.ycbcr.win = *win;
  721. ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
  722. ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
  723. }
  724. return 0;
  725. }
  726. static inline void
  727. vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
  728. struct v4l2_rect *win)
  729. {
  730. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  731. *win = ccdc->ccdc_cfg.bayer.win;
  732. else
  733. *win = ccdc->ccdc_cfg.ycbcr.win;
  734. }
  735. static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
  736. {
  737. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  738. return ccdc->ccdc_cfg.bayer.bytesperline;
  739. return ccdc->ccdc_cfg.ycbcr.bytesperline;
  740. }
  741. static inline int
  742. vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
  743. enum ccdc_frmfmt frm_fmt)
  744. {
  745. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  746. ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
  747. else
  748. ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
  749. return 0;
  750. }
  751. static inline enum ccdc_frmfmt
  752. vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
  753. {
  754. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  755. return ccdc->ccdc_cfg.bayer.frm_fmt;
  756. return ccdc->ccdc_cfg.ycbcr.frm_fmt;
  757. }
  758. static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
  759. {
  760. return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
  761. }
  762. static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
  763. {
  764. vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
  765. }
  766. static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
  767. struct vpfe_hw_if_param *params)
  768. {
  769. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  770. ccdc->ccdc_cfg.if_type = params->if_type;
  771. switch (params->if_type) {
  772. case VPFE_BT656:
  773. case VPFE_YCBCR_SYNC_16:
  774. case VPFE_YCBCR_SYNC_8:
  775. case VPFE_BT656_10BIT:
  776. ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
  777. ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
  778. break;
  779. case VPFE_RAW_BAYER:
  780. ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
  781. ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
  782. if (params->bus_width == 10)
  783. ccdc->ccdc_cfg.bayer.config_params.data_sz =
  784. VPFE_CCDC_DATA_10BITS;
  785. else
  786. ccdc->ccdc_cfg.bayer.config_params.data_sz =
  787. VPFE_CCDC_DATA_8BITS;
  788. vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
  789. params->bus_width);
  790. vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
  791. ccdc->ccdc_cfg.bayer.config_params.data_sz);
  792. break;
  793. default:
  794. return -EINVAL;
  795. }
  796. return 0;
  797. }
  798. static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
  799. {
  800. unsigned int vpfe_int_status;
  801. vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
  802. switch (vdint) {
  803. /* VD0 interrupt */
  804. case VPFE_VDINT0:
  805. vpfe_int_status &= ~VPFE_VDINT0;
  806. vpfe_int_status |= VPFE_VDINT0;
  807. break;
  808. /* VD1 interrupt */
  809. case VPFE_VDINT1:
  810. vpfe_int_status &= ~VPFE_VDINT1;
  811. vpfe_int_status |= VPFE_VDINT1;
  812. break;
  813. /* VD2 interrupt */
  814. case VPFE_VDINT2:
  815. vpfe_int_status &= ~VPFE_VDINT2;
  816. vpfe_int_status |= VPFE_VDINT2;
  817. break;
  818. /* Clear all interrupts */
  819. default:
  820. vpfe_int_status &= ~(VPFE_VDINT0 |
  821. VPFE_VDINT1 |
  822. VPFE_VDINT2);
  823. vpfe_int_status |= (VPFE_VDINT0 |
  824. VPFE_VDINT1 |
  825. VPFE_VDINT2);
  826. break;
  827. }
  828. /* Clear specific VDINT from the status register */
  829. vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
  830. vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
  831. /* Acknowledge that we are done with all interrupts */
  832. vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
  833. }
  834. static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
  835. {
  836. ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
  837. ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
  838. ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
  839. ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
  840. ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
  841. ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
  842. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
  843. ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
  844. ccdc->ccdc_cfg.ycbcr.win.left = 0;
  845. ccdc->ccdc_cfg.ycbcr.win.top = 0;
  846. ccdc->ccdc_cfg.ycbcr.win.width = 720;
  847. ccdc->ccdc_cfg.ycbcr.win.height = 576;
  848. ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
  849. ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
  850. ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
  851. ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
  852. ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
  853. ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
  854. ccdc->ccdc_cfg.bayer.win.left = 0;
  855. ccdc->ccdc_cfg.bayer.win.top = 0;
  856. ccdc->ccdc_cfg.bayer.win.width = 800;
  857. ccdc->ccdc_cfg.bayer.win.height = 600;
  858. ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
  859. ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
  860. VPFE_CCDC_GAMMA_BITS_09_0;
  861. }
  862. /*
  863. * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
  864. */
  865. static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
  866. struct v4l2_format *f)
  867. {
  868. struct v4l2_rect image_win;
  869. enum ccdc_buftype buf_type;
  870. enum ccdc_frmfmt frm_fmt;
  871. memset(f, 0, sizeof(*f));
  872. f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  873. vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
  874. f->fmt.pix.width = image_win.width;
  875. f->fmt.pix.height = image_win.height;
  876. f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
  877. f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
  878. f->fmt.pix.height;
  879. buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
  880. f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
  881. frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  882. if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
  883. f->fmt.pix.field = V4L2_FIELD_NONE;
  884. } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
  885. if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
  886. f->fmt.pix.field = V4L2_FIELD_INTERLACED;
  887. } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
  888. f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
  889. } else {
  890. vpfe_err(vpfe, "Invalid buf_type\n");
  891. return -EINVAL;
  892. }
  893. } else {
  894. vpfe_err(vpfe, "Invalid frm_fmt\n");
  895. return -EINVAL;
  896. }
  897. return 0;
  898. }
  899. static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
  900. {
  901. enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
  902. int ret;
  903. vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
  904. vpfe_dbg(1, vpfe, "pixelformat: %s\n",
  905. print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
  906. if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
  907. vpfe->fmt.fmt.pix.pixelformat) < 0) {
  908. vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
  909. return -EINVAL;
  910. }
  911. /* configure the image window */
  912. vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
  913. switch (vpfe->fmt.fmt.pix.field) {
  914. case V4L2_FIELD_INTERLACED:
  915. /* do nothing, since it is default */
  916. ret = vpfe_ccdc_set_buftype(
  917. &vpfe->ccdc,
  918. CCDC_BUFTYPE_FLD_INTERLEAVED);
  919. break;
  920. case V4L2_FIELD_NONE:
  921. frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
  922. /* buffer type only applicable for interlaced scan */
  923. break;
  924. case V4L2_FIELD_SEQ_TB:
  925. ret = vpfe_ccdc_set_buftype(
  926. &vpfe->ccdc,
  927. CCDC_BUFTYPE_FLD_SEPARATED);
  928. break;
  929. default:
  930. return -EINVAL;
  931. }
  932. if (ret)
  933. return ret;
  934. return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
  935. }
  936. /*
  937. * vpfe_config_image_format()
  938. * For a given standard, this functions sets up the default
  939. * pix format & crop values in the vpfe device and ccdc. It first
  940. * starts with defaults based values from the standard table.
  941. * It then checks if sub device supports get_fmt and then override the
  942. * values based on that.Sets crop values to match with scan resolution
  943. * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
  944. * values in ccdc
  945. */
  946. static int vpfe_config_image_format(struct vpfe_device *vpfe,
  947. v4l2_std_id std_id)
  948. {
  949. struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
  950. int i, ret;
  951. for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
  952. if (vpfe_standards[i].std_id & std_id) {
  953. vpfe->std_info.active_pixels =
  954. vpfe_standards[i].width;
  955. vpfe->std_info.active_lines =
  956. vpfe_standards[i].height;
  957. vpfe->std_info.frame_format =
  958. vpfe_standards[i].frame_format;
  959. vpfe->std_index = i;
  960. break;
  961. }
  962. }
  963. if (i == ARRAY_SIZE(vpfe_standards)) {
  964. vpfe_err(vpfe, "standard not supported\n");
  965. return -EINVAL;
  966. }
  967. vpfe->crop.top = vpfe->crop.left = 0;
  968. vpfe->crop.width = vpfe->std_info.active_pixels;
  969. vpfe->crop.height = vpfe->std_info.active_lines;
  970. pix->width = vpfe->crop.width;
  971. pix->height = vpfe->crop.height;
  972. pix->pixelformat = V4L2_PIX_FMT_YUYV;
  973. /* first field and frame format based on standard frame format */
  974. if (vpfe->std_info.frame_format)
  975. pix->field = V4L2_FIELD_INTERLACED;
  976. else
  977. pix->field = V4L2_FIELD_NONE;
  978. ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
  979. if (ret)
  980. return ret;
  981. /* Update the crop window based on found values */
  982. vpfe->crop.width = pix->width;
  983. vpfe->crop.height = pix->height;
  984. return vpfe_config_ccdc_image_format(vpfe);
  985. }
  986. static int vpfe_initialize_device(struct vpfe_device *vpfe)
  987. {
  988. struct vpfe_subdev_info *sdinfo;
  989. int ret;
  990. sdinfo = &vpfe->cfg->sub_devs[0];
  991. sdinfo->sd = vpfe->sd[0];
  992. vpfe->current_input = 0;
  993. vpfe->std_index = 0;
  994. /* Configure the default format information */
  995. ret = vpfe_config_image_format(vpfe,
  996. vpfe_standards[vpfe->std_index].std_id);
  997. if (ret)
  998. return ret;
  999. pm_runtime_get_sync(vpfe->pdev);
  1000. vpfe_config_enable(&vpfe->ccdc, 1);
  1001. vpfe_ccdc_restore_defaults(&vpfe->ccdc);
  1002. /* Clear all VPFE interrupts */
  1003. vpfe_clear_intr(&vpfe->ccdc, -1);
  1004. return ret;
  1005. }
  1006. /*
  1007. * vpfe_release : This function is based on the vb2_fop_release
  1008. * helper function.
  1009. * It has been augmented to handle module power management,
  1010. * by disabling/enabling h/w module fcntl clock when necessary.
  1011. */
  1012. static int vpfe_release(struct file *file)
  1013. {
  1014. struct vpfe_device *vpfe = video_drvdata(file);
  1015. bool fh_singular;
  1016. int ret;
  1017. mutex_lock(&vpfe->lock);
  1018. /* Save the singular status before we call the clean-up helper */
  1019. fh_singular = v4l2_fh_is_singular_file(file);
  1020. /* the release helper will cleanup any on-going streaming */
  1021. ret = _vb2_fop_release(file, NULL);
  1022. /*
  1023. * If this was the last open file.
  1024. * Then de-initialize hw module.
  1025. */
  1026. if (fh_singular)
  1027. vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
  1028. mutex_unlock(&vpfe->lock);
  1029. return ret;
  1030. }
  1031. /*
  1032. * vpfe_open : This function is based on the v4l2_fh_open helper function.
  1033. * It has been augmented to handle module power management,
  1034. * by disabling/enabling h/w module fcntl clock when necessary.
  1035. */
  1036. static int vpfe_open(struct file *file)
  1037. {
  1038. struct vpfe_device *vpfe = video_drvdata(file);
  1039. int ret;
  1040. mutex_lock(&vpfe->lock);
  1041. ret = v4l2_fh_open(file);
  1042. if (ret) {
  1043. vpfe_err(vpfe, "v4l2_fh_open failed\n");
  1044. goto unlock;
  1045. }
  1046. if (!v4l2_fh_is_singular_file(file))
  1047. goto unlock;
  1048. if (vpfe_initialize_device(vpfe)) {
  1049. v4l2_fh_release(file);
  1050. ret = -ENODEV;
  1051. }
  1052. unlock:
  1053. mutex_unlock(&vpfe->lock);
  1054. return ret;
  1055. }
  1056. /**
  1057. * vpfe_schedule_next_buffer: set next buffer address for capture
  1058. * @vpfe : ptr to vpfe device
  1059. *
  1060. * This function will get next buffer from the dma queue and
  1061. * set the buffer address in the vpfe register for capture.
  1062. * the buffer is marked active
  1063. *
  1064. * Assumes caller is holding vpfe->dma_queue_lock already
  1065. */
  1066. static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
  1067. {
  1068. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1069. struct vpfe_cap_buffer, list);
  1070. list_del(&vpfe->next_frm->list);
  1071. vpfe_set_sdr_addr(&vpfe->ccdc,
  1072. vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
  1073. }
  1074. static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
  1075. {
  1076. unsigned long addr;
  1077. addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
  1078. vpfe->field_off;
  1079. vpfe_set_sdr_addr(&vpfe->ccdc, addr);
  1080. }
  1081. /*
  1082. * vpfe_process_buffer_complete: process a completed buffer
  1083. * @vpfe : ptr to vpfe device
  1084. *
  1085. * This function time stamp the buffer and mark it as DONE. It also
  1086. * wake up any process waiting on the QUEUE and set the next buffer
  1087. * as current
  1088. */
  1089. static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
  1090. {
  1091. v4l2_get_timestamp(&vpfe->cur_frm->vb.timestamp);
  1092. vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
  1093. vpfe->cur_frm->vb.sequence = vpfe->sequence++;
  1094. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
  1095. vpfe->cur_frm = vpfe->next_frm;
  1096. }
  1097. /*
  1098. * vpfe_isr : ISR handler for vpfe capture (VINT0)
  1099. * @irq: irq number
  1100. * @dev_id: dev_id ptr
  1101. *
  1102. * It changes status of the captured buffer, takes next buffer from the queue
  1103. * and sets its address in VPFE registers
  1104. */
  1105. static irqreturn_t vpfe_isr(int irq, void *dev)
  1106. {
  1107. struct vpfe_device *vpfe = (struct vpfe_device *)dev;
  1108. enum v4l2_field field;
  1109. int intr_status;
  1110. int fid;
  1111. intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
  1112. if (intr_status & VPFE_VDINT0) {
  1113. field = vpfe->fmt.fmt.pix.field;
  1114. if (field == V4L2_FIELD_NONE) {
  1115. /* handle progressive frame capture */
  1116. if (vpfe->cur_frm != vpfe->next_frm)
  1117. vpfe_process_buffer_complete(vpfe);
  1118. goto next_intr;
  1119. }
  1120. /* interlaced or TB capture check which field
  1121. we are in hardware */
  1122. fid = vpfe_ccdc_getfid(&vpfe->ccdc);
  1123. /* switch the software maintained field id */
  1124. vpfe->field ^= 1;
  1125. if (fid == vpfe->field) {
  1126. /* we are in-sync here,continue */
  1127. if (fid == 0) {
  1128. /*
  1129. * One frame is just being captured. If the
  1130. * next frame is available, release the
  1131. * current frame and move on
  1132. */
  1133. if (vpfe->cur_frm != vpfe->next_frm)
  1134. vpfe_process_buffer_complete(vpfe);
  1135. /*
  1136. * based on whether the two fields are stored
  1137. * interleave or separately in memory,
  1138. * reconfigure the CCDC memory address
  1139. */
  1140. if (field == V4L2_FIELD_SEQ_TB)
  1141. vpfe_schedule_bottom_field(vpfe);
  1142. goto next_intr;
  1143. }
  1144. /*
  1145. * if one field is just being captured configure
  1146. * the next frame get the next frame from the empty
  1147. * queue if no frame is available hold on to the
  1148. * current buffer
  1149. */
  1150. spin_lock(&vpfe->dma_queue_lock);
  1151. if (!list_empty(&vpfe->dma_queue) &&
  1152. vpfe->cur_frm == vpfe->next_frm)
  1153. vpfe_schedule_next_buffer(vpfe);
  1154. spin_unlock(&vpfe->dma_queue_lock);
  1155. } else if (fid == 0) {
  1156. /*
  1157. * out of sync. Recover from any hardware out-of-sync.
  1158. * May loose one frame
  1159. */
  1160. vpfe->field = fid;
  1161. }
  1162. }
  1163. next_intr:
  1164. if (intr_status & VPFE_VDINT1) {
  1165. spin_lock(&vpfe->dma_queue_lock);
  1166. if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
  1167. !list_empty(&vpfe->dma_queue) &&
  1168. vpfe->cur_frm == vpfe->next_frm)
  1169. vpfe_schedule_next_buffer(vpfe);
  1170. spin_unlock(&vpfe->dma_queue_lock);
  1171. }
  1172. vpfe_clear_intr(&vpfe->ccdc, intr_status);
  1173. return IRQ_HANDLED;
  1174. }
  1175. static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
  1176. {
  1177. unsigned int intr = VPFE_VDINT0;
  1178. enum ccdc_frmfmt frame_format;
  1179. frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  1180. if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
  1181. intr |= VPFE_VDINT1;
  1182. vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
  1183. }
  1184. static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
  1185. {
  1186. unsigned int intr = VPFE_VDINT0;
  1187. enum ccdc_frmfmt frame_format;
  1188. frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  1189. if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
  1190. intr |= VPFE_VDINT1;
  1191. vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
  1192. }
  1193. static int vpfe_querycap(struct file *file, void *priv,
  1194. struct v4l2_capability *cap)
  1195. {
  1196. struct vpfe_device *vpfe = video_drvdata(file);
  1197. vpfe_dbg(2, vpfe, "vpfe_querycap\n");
  1198. strlcpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
  1199. strlcpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
  1200. snprintf(cap->bus_info, sizeof(cap->bus_info),
  1201. "platform:%s", vpfe->v4l2_dev.name);
  1202. cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
  1203. V4L2_CAP_READWRITE;
  1204. cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
  1205. return 0;
  1206. }
  1207. /* get the format set at output pad of the adjacent subdev */
  1208. static int __vpfe_get_format(struct vpfe_device *vpfe,
  1209. struct v4l2_format *format, unsigned int *bpp)
  1210. {
  1211. struct v4l2_mbus_framefmt mbus_fmt;
  1212. struct vpfe_subdev_info *sdinfo;
  1213. struct v4l2_subdev_format fmt;
  1214. int ret;
  1215. sdinfo = vpfe->current_subdev;
  1216. if (!sdinfo->sd)
  1217. return -EINVAL;
  1218. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1219. fmt.pad = 0;
  1220. ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
  1221. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1222. return ret;
  1223. if (!ret) {
  1224. v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
  1225. mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
  1226. } else {
  1227. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
  1228. sdinfo->grp_id,
  1229. pad, get_fmt,
  1230. NULL, &fmt);
  1231. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1232. return ret;
  1233. v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
  1234. mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
  1235. }
  1236. format->type = vpfe->fmt.type;
  1237. vpfe_dbg(1, vpfe,
  1238. "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
  1239. __func__, format->fmt.pix.width, format->fmt.pix.height,
  1240. print_fourcc(format->fmt.pix.pixelformat),
  1241. format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
  1242. return 0;
  1243. }
  1244. /* set the format at output pad of the adjacent subdev */
  1245. static int __vpfe_set_format(struct vpfe_device *vpfe,
  1246. struct v4l2_format *format, unsigned int *bpp)
  1247. {
  1248. struct vpfe_subdev_info *sdinfo;
  1249. struct v4l2_subdev_format fmt;
  1250. int ret;
  1251. vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
  1252. sdinfo = vpfe->current_subdev;
  1253. if (!sdinfo->sd)
  1254. return -EINVAL;
  1255. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1256. fmt.pad = 0;
  1257. pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
  1258. ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
  1259. if (ret)
  1260. return ret;
  1261. v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
  1262. mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
  1263. format->type = vpfe->fmt.type;
  1264. vpfe_dbg(1, vpfe,
  1265. "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
  1266. __func__, format->fmt.pix.width, format->fmt.pix.height,
  1267. print_fourcc(format->fmt.pix.pixelformat),
  1268. format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
  1269. return 0;
  1270. }
  1271. static int vpfe_g_fmt(struct file *file, void *priv,
  1272. struct v4l2_format *fmt)
  1273. {
  1274. struct vpfe_device *vpfe = video_drvdata(file);
  1275. vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
  1276. *fmt = vpfe->fmt;
  1277. return 0;
  1278. }
  1279. static int vpfe_enum_fmt(struct file *file, void *priv,
  1280. struct v4l2_fmtdesc *f)
  1281. {
  1282. struct vpfe_device *vpfe = video_drvdata(file);
  1283. struct vpfe_subdev_info *sdinfo;
  1284. struct vpfe_fmt *fmt = NULL;
  1285. unsigned int k;
  1286. vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
  1287. f->index);
  1288. sdinfo = vpfe->current_subdev;
  1289. if (!sdinfo->sd)
  1290. return -EINVAL;
  1291. if (f->index > ARRAY_SIZE(formats))
  1292. return -EINVAL;
  1293. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  1294. if (formats[k].index == f->index) {
  1295. fmt = &formats[k];
  1296. break;
  1297. }
  1298. }
  1299. if (!fmt)
  1300. return -EINVAL;
  1301. strncpy(f->description, fmt->name, sizeof(f->description) - 1);
  1302. f->pixelformat = fmt->fourcc;
  1303. f->type = vpfe->fmt.type;
  1304. vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s [%s]\n",
  1305. f->index, fmt->code, print_fourcc(fmt->fourcc), fmt->name);
  1306. return 0;
  1307. }
  1308. static int vpfe_try_fmt(struct file *file, void *priv,
  1309. struct v4l2_format *fmt)
  1310. {
  1311. struct vpfe_device *vpfe = video_drvdata(file);
  1312. unsigned int bpp;
  1313. vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
  1314. return __vpfe_get_format(vpfe, fmt, &bpp);
  1315. }
  1316. static int vpfe_s_fmt(struct file *file, void *priv,
  1317. struct v4l2_format *fmt)
  1318. {
  1319. struct vpfe_device *vpfe = video_drvdata(file);
  1320. struct v4l2_format format;
  1321. unsigned int bpp;
  1322. int ret;
  1323. vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
  1324. /* If streaming is started, return error */
  1325. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1326. vpfe_err(vpfe, "%s device busy\n", __func__);
  1327. return -EBUSY;
  1328. }
  1329. ret = __vpfe_get_format(vpfe, &format, &bpp);
  1330. if (ret)
  1331. return ret;
  1332. if (!cmp_v4l2_format(fmt, &format)) {
  1333. /* Sensor format is different from the requested format
  1334. * so we need to change it
  1335. */
  1336. ret = __vpfe_set_format(vpfe, fmt, &bpp);
  1337. if (ret)
  1338. return ret;
  1339. } else /* Just make sure all of the fields are consistent */
  1340. *fmt = format;
  1341. /* First detach any IRQ if currently attached */
  1342. vpfe_detach_irq(vpfe);
  1343. vpfe->fmt = *fmt;
  1344. vpfe->bpp = bpp;
  1345. /* Update the crop window based on found values */
  1346. vpfe->crop.width = fmt->fmt.pix.width;
  1347. vpfe->crop.height = fmt->fmt.pix.height;
  1348. /* set image capture parameters in the ccdc */
  1349. return vpfe_config_ccdc_image_format(vpfe);
  1350. }
  1351. static int vpfe_enum_size(struct file *file, void *priv,
  1352. struct v4l2_frmsizeenum *fsize)
  1353. {
  1354. struct vpfe_device *vpfe = video_drvdata(file);
  1355. struct v4l2_subdev_frame_size_enum fse;
  1356. struct vpfe_subdev_info *sdinfo;
  1357. struct v4l2_mbus_framefmt mbus;
  1358. struct v4l2_pix_format pix;
  1359. struct vpfe_fmt *fmt;
  1360. int ret;
  1361. vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
  1362. /* check for valid format */
  1363. fmt = find_format_by_pix(fsize->pixel_format);
  1364. if (!fmt) {
  1365. vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
  1366. fsize->pixel_format);
  1367. return -EINVAL;
  1368. }
  1369. memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
  1370. sdinfo = vpfe->current_subdev;
  1371. if (!sdinfo->sd)
  1372. return -EINVAL;
  1373. memset(&pix, 0x0, sizeof(pix));
  1374. /* Construct pix from parameter and use default for the rest */
  1375. pix.pixelformat = fsize->pixel_format;
  1376. pix.width = 640;
  1377. pix.height = 480;
  1378. pix.colorspace = V4L2_COLORSPACE_SRGB;
  1379. pix.field = V4L2_FIELD_NONE;
  1380. pix_to_mbus(vpfe, &pix, &mbus);
  1381. memset(&fse, 0x0, sizeof(fse));
  1382. fse.index = fsize->index;
  1383. fse.pad = 0;
  1384. fse.code = mbus.code;
  1385. fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1386. ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
  1387. if (ret)
  1388. return -EINVAL;
  1389. vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
  1390. fse.index, fse.code, fse.min_width, fse.max_width,
  1391. fse.min_height, fse.max_height);
  1392. fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
  1393. fsize->discrete.width = fse.max_width;
  1394. fsize->discrete.height = fse.max_height;
  1395. vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
  1396. fsize->index, print_fourcc(fsize->pixel_format),
  1397. fsize->discrete.width, fsize->discrete.height);
  1398. return 0;
  1399. }
  1400. /*
  1401. * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
  1402. * given app input index
  1403. */
  1404. static int
  1405. vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
  1406. int *subdev_index,
  1407. int *subdev_input_index,
  1408. int app_input_index)
  1409. {
  1410. int i, j = 0;
  1411. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1412. if (app_input_index < (j + 1)) {
  1413. *subdev_index = i;
  1414. *subdev_input_index = app_input_index - j;
  1415. return 0;
  1416. }
  1417. j++;
  1418. }
  1419. return -EINVAL;
  1420. }
  1421. /*
  1422. * vpfe_get_app_input - Get app input index for a given subdev input index
  1423. * driver stores the input index of the current sub device and translate it
  1424. * when application request the current input
  1425. */
  1426. static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
  1427. int *app_input_index)
  1428. {
  1429. struct vpfe_config *cfg = vpfe->cfg;
  1430. struct vpfe_subdev_info *sdinfo;
  1431. struct i2c_client *client;
  1432. struct i2c_client *curr_client;
  1433. int i, j = 0;
  1434. curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd);
  1435. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1436. sdinfo = &cfg->sub_devs[i];
  1437. client = v4l2_get_subdevdata(sdinfo->sd);
  1438. if (client->addr == curr_client->addr &&
  1439. client->adapter->nr == curr_client->adapter->nr) {
  1440. if (vpfe->current_input >= 1)
  1441. return -1;
  1442. *app_input_index = j + vpfe->current_input;
  1443. return 0;
  1444. }
  1445. j++;
  1446. }
  1447. return -EINVAL;
  1448. }
  1449. static int vpfe_enum_input(struct file *file, void *priv,
  1450. struct v4l2_input *inp)
  1451. {
  1452. struct vpfe_device *vpfe = video_drvdata(file);
  1453. struct vpfe_subdev_info *sdinfo;
  1454. int subdev, index;
  1455. vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
  1456. if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
  1457. inp->index) < 0) {
  1458. vpfe_dbg(1, vpfe,
  1459. "input information not found for the subdev\n");
  1460. return -EINVAL;
  1461. }
  1462. sdinfo = &vpfe->cfg->sub_devs[subdev];
  1463. *inp = sdinfo->inputs[index];
  1464. return 0;
  1465. }
  1466. static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
  1467. {
  1468. struct vpfe_device *vpfe = video_drvdata(file);
  1469. vpfe_dbg(2, vpfe, "vpfe_g_input\n");
  1470. return vpfe_get_app_input_index(vpfe, index);
  1471. }
  1472. /* Assumes caller is holding vpfe_dev->lock */
  1473. static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
  1474. {
  1475. int subdev_index = 0, inp_index = 0;
  1476. struct vpfe_subdev_info *sdinfo;
  1477. struct vpfe_route *route;
  1478. u32 input, output;
  1479. int ret;
  1480. vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
  1481. /* If streaming is started, return error */
  1482. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1483. vpfe_err(vpfe, "%s device busy\n", __func__);
  1484. return -EBUSY;
  1485. }
  1486. ret = vpfe_get_subdev_input_index(vpfe,
  1487. &subdev_index,
  1488. &inp_index,
  1489. index);
  1490. if (ret < 0) {
  1491. vpfe_err(vpfe, "invalid input index: %d\n", index);
  1492. goto get_out;
  1493. }
  1494. sdinfo = &vpfe->cfg->sub_devs[subdev_index];
  1495. sdinfo->sd = vpfe->sd[subdev_index];
  1496. route = &sdinfo->routes[inp_index];
  1497. if (route && sdinfo->can_route) {
  1498. input = route->input;
  1499. output = route->output;
  1500. if (sdinfo->sd) {
  1501. ret = v4l2_subdev_call(sdinfo->sd, video,
  1502. s_routing, input, output, 0);
  1503. if (ret) {
  1504. vpfe_err(vpfe, "s_routing failed\n");
  1505. ret = -EINVAL;
  1506. goto get_out;
  1507. }
  1508. }
  1509. }
  1510. vpfe->current_subdev = sdinfo;
  1511. if (sdinfo->sd)
  1512. vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
  1513. vpfe->current_input = index;
  1514. vpfe->std_index = 0;
  1515. /* set the bus/interface parameter for the sub device in ccdc */
  1516. ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
  1517. if (ret)
  1518. return ret;
  1519. /* set the default image parameters in the device */
  1520. return vpfe_config_image_format(vpfe,
  1521. vpfe_standards[vpfe->std_index].std_id);
  1522. get_out:
  1523. return ret;
  1524. }
  1525. static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
  1526. {
  1527. struct vpfe_device *vpfe = video_drvdata(file);
  1528. vpfe_dbg(2, vpfe,
  1529. "vpfe_s_input: index: %d\n", index);
  1530. return vpfe_set_input(vpfe, index);
  1531. }
  1532. static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
  1533. {
  1534. struct vpfe_device *vpfe = video_drvdata(file);
  1535. struct vpfe_subdev_info *sdinfo;
  1536. vpfe_dbg(2, vpfe, "vpfe_querystd\n");
  1537. sdinfo = vpfe->current_subdev;
  1538. if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
  1539. return -ENODATA;
  1540. /* Call querystd function of decoder device */
  1541. return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
  1542. video, querystd, std_id);
  1543. }
  1544. static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
  1545. {
  1546. struct vpfe_device *vpfe = video_drvdata(file);
  1547. struct vpfe_subdev_info *sdinfo;
  1548. int ret;
  1549. vpfe_dbg(2, vpfe, "vpfe_s_std\n");
  1550. sdinfo = vpfe->current_subdev;
  1551. if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
  1552. return -ENODATA;
  1553. /* If streaming is started, return error */
  1554. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1555. vpfe_err(vpfe, "%s device busy\n", __func__);
  1556. ret = -EBUSY;
  1557. return ret;
  1558. }
  1559. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
  1560. video, s_std, std_id);
  1561. if (ret < 0) {
  1562. vpfe_err(vpfe, "Failed to set standard\n");
  1563. return ret;
  1564. }
  1565. ret = vpfe_config_image_format(vpfe, std_id);
  1566. return ret;
  1567. }
  1568. static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
  1569. {
  1570. struct vpfe_device *vpfe = video_drvdata(file);
  1571. struct vpfe_subdev_info *sdinfo;
  1572. vpfe_dbg(2, vpfe, "vpfe_g_std\n");
  1573. sdinfo = vpfe->current_subdev;
  1574. if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
  1575. return -ENODATA;
  1576. *std_id = vpfe_standards[vpfe->std_index].std_id;
  1577. return 0;
  1578. }
  1579. /*
  1580. * vpfe_calculate_offsets : This function calculates buffers offset
  1581. * for top and bottom field
  1582. */
  1583. static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
  1584. {
  1585. struct v4l2_rect image_win;
  1586. vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
  1587. vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
  1588. vpfe->field_off = image_win.height * image_win.width;
  1589. }
  1590. /*
  1591. * vpfe_queue_setup - Callback function for buffer setup.
  1592. * @vq: vb2_queue ptr
  1593. * @fmt: v4l2 format
  1594. * @nbuffers: ptr to number of buffers requested by application
  1595. * @nplanes:: contains number of distinct video planes needed to hold a frame
  1596. * @sizes[]: contains the size (in bytes) of each plane.
  1597. * @alloc_ctxs: ptr to allocation context
  1598. *
  1599. * This callback function is called when reqbuf() is called to adjust
  1600. * the buffer count and buffer size
  1601. */
  1602. static int vpfe_queue_setup(struct vb2_queue *vq,
  1603. const void *parg,
  1604. unsigned int *nbuffers, unsigned int *nplanes,
  1605. unsigned int sizes[], void *alloc_ctxs[])
  1606. {
  1607. const struct v4l2_format *fmt = parg;
  1608. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1609. if (fmt && fmt->fmt.pix.sizeimage < vpfe->fmt.fmt.pix.sizeimage)
  1610. return -EINVAL;
  1611. if (vq->num_buffers + *nbuffers < 3)
  1612. *nbuffers = 3 - vq->num_buffers;
  1613. *nplanes = 1;
  1614. sizes[0] = fmt ? fmt->fmt.pix.sizeimage : vpfe->fmt.fmt.pix.sizeimage;
  1615. alloc_ctxs[0] = vpfe->alloc_ctx;
  1616. vpfe_dbg(1, vpfe,
  1617. "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
  1618. /* Calculate field offset */
  1619. vpfe_calculate_offsets(vpfe);
  1620. return 0;
  1621. }
  1622. /*
  1623. * vpfe_buffer_prepare : callback function for buffer prepare
  1624. * @vb: ptr to vb2_buffer
  1625. *
  1626. * This is the callback function for buffer prepare when vb2_qbuf()
  1627. * function is called. The buffer is prepared and user space virtual address
  1628. * or user address is converted into physical address
  1629. */
  1630. static int vpfe_buffer_prepare(struct vb2_buffer *vb)
  1631. {
  1632. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1633. struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
  1634. vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
  1635. if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
  1636. return -EINVAL;
  1637. vbuf->field = vpfe->fmt.fmt.pix.field;
  1638. return 0;
  1639. }
  1640. /*
  1641. * vpfe_buffer_queue : Callback function to add buffer to DMA queue
  1642. * @vb: ptr to vb2_buffer
  1643. */
  1644. static void vpfe_buffer_queue(struct vb2_buffer *vb)
  1645. {
  1646. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1647. struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
  1648. struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
  1649. unsigned long flags = 0;
  1650. /* add the buffer to the DMA queue */
  1651. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1652. list_add_tail(&buf->list, &vpfe->dma_queue);
  1653. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1654. }
  1655. /*
  1656. * vpfe_start_streaming : Starts the DMA engine for streaming
  1657. * @vb: ptr to vb2_buffer
  1658. * @count: number of buffers
  1659. */
  1660. static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
  1661. {
  1662. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1663. struct vpfe_cap_buffer *buf, *tmp;
  1664. struct vpfe_subdev_info *sdinfo;
  1665. unsigned long flags;
  1666. unsigned long addr;
  1667. int ret;
  1668. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1669. vpfe->field = 0;
  1670. vpfe->sequence = 0;
  1671. sdinfo = vpfe->current_subdev;
  1672. vpfe_attach_irq(vpfe);
  1673. if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
  1674. vpfe_ccdc_config_raw(&vpfe->ccdc);
  1675. else
  1676. vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
  1677. /* Get the next frame from the buffer queue */
  1678. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1679. struct vpfe_cap_buffer, list);
  1680. vpfe->cur_frm = vpfe->next_frm;
  1681. /* Remove buffer from the buffer queue */
  1682. list_del(&vpfe->cur_frm->list);
  1683. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1684. addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
  1685. vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
  1686. vpfe_pcr_enable(&vpfe->ccdc, 1);
  1687. ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
  1688. if (ret < 0) {
  1689. vpfe_err(vpfe, "Error in attaching interrupt handle\n");
  1690. goto err;
  1691. }
  1692. return 0;
  1693. err:
  1694. list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
  1695. list_del(&buf->list);
  1696. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
  1697. }
  1698. return ret;
  1699. }
  1700. /*
  1701. * vpfe_stop_streaming : Stop the DMA engine
  1702. * @vq: ptr to vb2_queue
  1703. *
  1704. * This callback stops the DMA engine and any remaining buffers
  1705. * in the DMA queue are released.
  1706. */
  1707. static void vpfe_stop_streaming(struct vb2_queue *vq)
  1708. {
  1709. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1710. struct vpfe_subdev_info *sdinfo;
  1711. unsigned long flags;
  1712. int ret;
  1713. vpfe_pcr_enable(&vpfe->ccdc, 0);
  1714. vpfe_detach_irq(vpfe);
  1715. sdinfo = vpfe->current_subdev;
  1716. ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
  1717. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1718. vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
  1719. /* release all active buffers */
  1720. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1721. if (vpfe->cur_frm == vpfe->next_frm) {
  1722. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
  1723. VB2_BUF_STATE_ERROR);
  1724. } else {
  1725. if (vpfe->cur_frm != NULL)
  1726. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
  1727. VB2_BUF_STATE_ERROR);
  1728. if (vpfe->next_frm != NULL)
  1729. vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
  1730. VB2_BUF_STATE_ERROR);
  1731. }
  1732. while (!list_empty(&vpfe->dma_queue)) {
  1733. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1734. struct vpfe_cap_buffer, list);
  1735. list_del(&vpfe->next_frm->list);
  1736. vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
  1737. VB2_BUF_STATE_ERROR);
  1738. }
  1739. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1740. }
  1741. static int vpfe_cropcap(struct file *file, void *priv,
  1742. struct v4l2_cropcap *crop)
  1743. {
  1744. struct vpfe_device *vpfe = video_drvdata(file);
  1745. vpfe_dbg(2, vpfe, "vpfe_cropcap\n");
  1746. if (vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
  1747. return -EINVAL;
  1748. memset(crop, 0, sizeof(struct v4l2_cropcap));
  1749. crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1750. crop->defrect.width = vpfe_standards[vpfe->std_index].width;
  1751. crop->bounds.width = crop->defrect.width;
  1752. crop->defrect.height = vpfe_standards[vpfe->std_index].height;
  1753. crop->bounds.height = crop->defrect.height;
  1754. crop->pixelaspect = vpfe_standards[vpfe->std_index].pixelaspect;
  1755. return 0;
  1756. }
  1757. static int
  1758. vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
  1759. {
  1760. struct vpfe_device *vpfe = video_drvdata(file);
  1761. switch (s->target) {
  1762. case V4L2_SEL_TGT_CROP_BOUNDS:
  1763. case V4L2_SEL_TGT_CROP_DEFAULT:
  1764. s->r.left = s->r.top = 0;
  1765. s->r.width = vpfe->crop.width;
  1766. s->r.height = vpfe->crop.height;
  1767. break;
  1768. case V4L2_SEL_TGT_CROP:
  1769. s->r = vpfe->crop;
  1770. break;
  1771. default:
  1772. return -EINVAL;
  1773. }
  1774. return 0;
  1775. }
  1776. static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
  1777. {
  1778. if (a->left < b->left || a->top < b->top)
  1779. return 0;
  1780. if (a->left + a->width > b->left + b->width)
  1781. return 0;
  1782. if (a->top + a->height > b->top + b->height)
  1783. return 0;
  1784. return 1;
  1785. }
  1786. static int
  1787. vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
  1788. {
  1789. struct vpfe_device *vpfe = video_drvdata(file);
  1790. struct v4l2_rect cr = vpfe->crop;
  1791. struct v4l2_rect r = s->r;
  1792. /* If streaming is started, return error */
  1793. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1794. vpfe_err(vpfe, "%s device busy\n", __func__);
  1795. return -EBUSY;
  1796. }
  1797. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
  1798. s->target != V4L2_SEL_TGT_CROP)
  1799. return -EINVAL;
  1800. v4l_bound_align_image(&r.width, 0, cr.width, 0,
  1801. &r.height, 0, cr.height, 0, 0);
  1802. r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
  1803. r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
  1804. if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
  1805. return -ERANGE;
  1806. if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
  1807. return -ERANGE;
  1808. s->r = vpfe->crop = r;
  1809. vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
  1810. vpfe->fmt.fmt.pix.width = r.width;
  1811. vpfe->fmt.fmt.pix.height = r.height;
  1812. vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
  1813. vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
  1814. vpfe->fmt.fmt.pix.height;
  1815. vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
  1816. r.left, r.top, r.width, r.height, cr.width, cr.height);
  1817. return 0;
  1818. }
  1819. static long vpfe_ioctl_default(struct file *file, void *priv,
  1820. bool valid_prio, unsigned int cmd, void *param)
  1821. {
  1822. struct vpfe_device *vpfe = video_drvdata(file);
  1823. int ret;
  1824. vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
  1825. if (!valid_prio) {
  1826. vpfe_err(vpfe, "%s device busy\n", __func__);
  1827. return -EBUSY;
  1828. }
  1829. /* If streaming is started, return error */
  1830. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1831. vpfe_err(vpfe, "%s device busy\n", __func__);
  1832. return -EBUSY;
  1833. }
  1834. switch (cmd) {
  1835. case VIDIOC_AM437X_CCDC_CFG:
  1836. ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
  1837. if (ret) {
  1838. vpfe_dbg(2, vpfe,
  1839. "Error setting parameters in CCDC\n");
  1840. return ret;
  1841. }
  1842. ret = vpfe_get_ccdc_image_format(vpfe,
  1843. &vpfe->fmt);
  1844. if (ret < 0) {
  1845. vpfe_dbg(2, vpfe,
  1846. "Invalid image format at CCDC\n");
  1847. return ret;
  1848. }
  1849. break;
  1850. default:
  1851. ret = -ENOTTY;
  1852. break;
  1853. }
  1854. return ret;
  1855. }
  1856. static const struct vb2_ops vpfe_video_qops = {
  1857. .wait_prepare = vb2_ops_wait_prepare,
  1858. .wait_finish = vb2_ops_wait_finish,
  1859. .queue_setup = vpfe_queue_setup,
  1860. .buf_prepare = vpfe_buffer_prepare,
  1861. .buf_queue = vpfe_buffer_queue,
  1862. .start_streaming = vpfe_start_streaming,
  1863. .stop_streaming = vpfe_stop_streaming,
  1864. };
  1865. /* vpfe capture driver file operations */
  1866. static const struct v4l2_file_operations vpfe_fops = {
  1867. .owner = THIS_MODULE,
  1868. .open = vpfe_open,
  1869. .release = vpfe_release,
  1870. .read = vb2_fop_read,
  1871. .poll = vb2_fop_poll,
  1872. .unlocked_ioctl = video_ioctl2,
  1873. .mmap = vb2_fop_mmap,
  1874. };
  1875. /* vpfe capture ioctl operations */
  1876. static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
  1877. .vidioc_querycap = vpfe_querycap,
  1878. .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
  1879. .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
  1880. .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
  1881. .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
  1882. .vidioc_enum_framesizes = vpfe_enum_size,
  1883. .vidioc_enum_input = vpfe_enum_input,
  1884. .vidioc_g_input = vpfe_g_input,
  1885. .vidioc_s_input = vpfe_s_input,
  1886. .vidioc_querystd = vpfe_querystd,
  1887. .vidioc_s_std = vpfe_s_std,
  1888. .vidioc_g_std = vpfe_g_std,
  1889. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  1890. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  1891. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  1892. .vidioc_querybuf = vb2_ioctl_querybuf,
  1893. .vidioc_qbuf = vb2_ioctl_qbuf,
  1894. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  1895. .vidioc_expbuf = vb2_ioctl_expbuf,
  1896. .vidioc_streamon = vb2_ioctl_streamon,
  1897. .vidioc_streamoff = vb2_ioctl_streamoff,
  1898. .vidioc_log_status = v4l2_ctrl_log_status,
  1899. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  1900. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  1901. .vidioc_cropcap = vpfe_cropcap,
  1902. .vidioc_g_selection = vpfe_g_selection,
  1903. .vidioc_s_selection = vpfe_s_selection,
  1904. .vidioc_default = vpfe_ioctl_default,
  1905. };
  1906. static int
  1907. vpfe_async_bound(struct v4l2_async_notifier *notifier,
  1908. struct v4l2_subdev *subdev,
  1909. struct v4l2_async_subdev *asd)
  1910. {
  1911. struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
  1912. struct vpfe_device, v4l2_dev);
  1913. struct v4l2_subdev_mbus_code_enum mbus_code;
  1914. struct vpfe_subdev_info *sdinfo;
  1915. bool found = false;
  1916. int i, j;
  1917. vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
  1918. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1919. if (vpfe->cfg->asd[i]->match.of.node == asd[i].match.of.node) {
  1920. sdinfo = &vpfe->cfg->sub_devs[i];
  1921. vpfe->sd[i] = subdev;
  1922. vpfe->sd[i]->grp_id = sdinfo->grp_id;
  1923. found = true;
  1924. break;
  1925. }
  1926. }
  1927. if (!found) {
  1928. vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
  1929. return -EINVAL;
  1930. }
  1931. vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
  1932. /* setup the supported formats & indexes */
  1933. for (j = 0, i = 0; ; ++j) {
  1934. struct vpfe_fmt *fmt;
  1935. int ret;
  1936. memset(&mbus_code, 0, sizeof(mbus_code));
  1937. mbus_code.index = j;
  1938. mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1939. ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
  1940. NULL, &mbus_code);
  1941. if (ret)
  1942. break;
  1943. fmt = find_format_by_code(mbus_code.code);
  1944. if (!fmt)
  1945. continue;
  1946. fmt->supported = true;
  1947. fmt->index = i++;
  1948. }
  1949. return 0;
  1950. }
  1951. static int vpfe_probe_complete(struct vpfe_device *vpfe)
  1952. {
  1953. struct video_device *vdev;
  1954. struct vb2_queue *q;
  1955. int err;
  1956. spin_lock_init(&vpfe->dma_queue_lock);
  1957. mutex_init(&vpfe->lock);
  1958. vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1959. /* set first sub device as current one */
  1960. vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
  1961. vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
  1962. err = vpfe_set_input(vpfe, 0);
  1963. if (err)
  1964. goto probe_out;
  1965. /* Initialize videobuf2 queue as per the buffer type */
  1966. vpfe->alloc_ctx = vb2_dma_contig_init_ctx(vpfe->pdev);
  1967. if (IS_ERR(vpfe->alloc_ctx)) {
  1968. vpfe_err(vpfe, "Failed to get the context\n");
  1969. err = PTR_ERR(vpfe->alloc_ctx);
  1970. goto probe_out;
  1971. }
  1972. q = &vpfe->buffer_queue;
  1973. q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1974. q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
  1975. q->drv_priv = vpfe;
  1976. q->ops = &vpfe_video_qops;
  1977. q->mem_ops = &vb2_dma_contig_memops;
  1978. q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
  1979. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  1980. q->lock = &vpfe->lock;
  1981. q->min_buffers_needed = 1;
  1982. err = vb2_queue_init(q);
  1983. if (err) {
  1984. vpfe_err(vpfe, "vb2_queue_init() failed\n");
  1985. vb2_dma_contig_cleanup_ctx(vpfe->alloc_ctx);
  1986. goto probe_out;
  1987. }
  1988. INIT_LIST_HEAD(&vpfe->dma_queue);
  1989. vdev = &vpfe->video_dev;
  1990. strlcpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
  1991. vdev->release = video_device_release_empty;
  1992. vdev->fops = &vpfe_fops;
  1993. vdev->ioctl_ops = &vpfe_ioctl_ops;
  1994. vdev->v4l2_dev = &vpfe->v4l2_dev;
  1995. vdev->vfl_dir = VFL_DIR_RX;
  1996. vdev->queue = q;
  1997. vdev->lock = &vpfe->lock;
  1998. video_set_drvdata(vdev, vpfe);
  1999. err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1);
  2000. if (err) {
  2001. vpfe_err(vpfe,
  2002. "Unable to register video device.\n");
  2003. goto probe_out;
  2004. }
  2005. return 0;
  2006. probe_out:
  2007. v4l2_device_unregister(&vpfe->v4l2_dev);
  2008. return err;
  2009. }
  2010. static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
  2011. {
  2012. struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
  2013. struct vpfe_device, v4l2_dev);
  2014. return vpfe_probe_complete(vpfe);
  2015. }
  2016. static struct vpfe_config *
  2017. vpfe_get_pdata(struct platform_device *pdev)
  2018. {
  2019. struct device_node *endpoint = NULL;
  2020. struct v4l2_of_endpoint bus_cfg;
  2021. struct vpfe_subdev_info *sdinfo;
  2022. struct vpfe_config *pdata;
  2023. unsigned int flags;
  2024. unsigned int i;
  2025. int err;
  2026. dev_dbg(&pdev->dev, "vpfe_get_pdata\n");
  2027. if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
  2028. return pdev->dev.platform_data;
  2029. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  2030. if (!pdata)
  2031. return NULL;
  2032. for (i = 0; ; i++) {
  2033. struct device_node *rem;
  2034. endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
  2035. endpoint);
  2036. if (!endpoint)
  2037. break;
  2038. sdinfo = &pdata->sub_devs[i];
  2039. sdinfo->grp_id = 0;
  2040. /* we only support camera */
  2041. sdinfo->inputs[0].index = i;
  2042. strcpy(sdinfo->inputs[0].name, "Camera");
  2043. sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
  2044. sdinfo->inputs[0].std = V4L2_STD_ALL;
  2045. sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
  2046. sdinfo->can_route = 0;
  2047. sdinfo->routes = NULL;
  2048. of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
  2049. &sdinfo->vpfe_param.if_type);
  2050. if (sdinfo->vpfe_param.if_type < 0 ||
  2051. sdinfo->vpfe_param.if_type > 4) {
  2052. sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
  2053. }
  2054. err = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
  2055. if (err) {
  2056. dev_err(&pdev->dev, "Could not parse the endpoint\n");
  2057. goto done;
  2058. }
  2059. sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
  2060. if (sdinfo->vpfe_param.bus_width < 8 ||
  2061. sdinfo->vpfe_param.bus_width > 16) {
  2062. dev_err(&pdev->dev, "Invalid bus width.\n");
  2063. goto done;
  2064. }
  2065. flags = bus_cfg.bus.parallel.flags;
  2066. if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
  2067. sdinfo->vpfe_param.hdpol = 1;
  2068. if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
  2069. sdinfo->vpfe_param.vdpol = 1;
  2070. rem = of_graph_get_remote_port_parent(endpoint);
  2071. if (!rem) {
  2072. dev_err(&pdev->dev, "Remote device at %s not found\n",
  2073. endpoint->full_name);
  2074. goto done;
  2075. }
  2076. pdata->asd[i] = devm_kzalloc(&pdev->dev,
  2077. sizeof(struct v4l2_async_subdev),
  2078. GFP_KERNEL);
  2079. if (!pdata->asd[i]) {
  2080. of_node_put(rem);
  2081. pdata = NULL;
  2082. goto done;
  2083. }
  2084. pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_OF;
  2085. pdata->asd[i]->match.of.node = rem;
  2086. of_node_put(rem);
  2087. }
  2088. of_node_put(endpoint);
  2089. return pdata;
  2090. done:
  2091. of_node_put(endpoint);
  2092. return NULL;
  2093. }
  2094. /*
  2095. * vpfe_probe : This function creates device entries by register
  2096. * itself to the V4L2 driver and initializes fields of each
  2097. * device objects
  2098. */
  2099. static int vpfe_probe(struct platform_device *pdev)
  2100. {
  2101. struct vpfe_config *vpfe_cfg = vpfe_get_pdata(pdev);
  2102. struct vpfe_device *vpfe;
  2103. struct vpfe_ccdc *ccdc;
  2104. struct resource *res;
  2105. int ret;
  2106. if (!vpfe_cfg) {
  2107. dev_err(&pdev->dev, "No platform data\n");
  2108. return -EINVAL;
  2109. }
  2110. vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
  2111. if (!vpfe)
  2112. return -ENOMEM;
  2113. vpfe->pdev = &pdev->dev;
  2114. vpfe->cfg = vpfe_cfg;
  2115. ccdc = &vpfe->ccdc;
  2116. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2117. ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
  2118. if (IS_ERR(ccdc->ccdc_cfg.base_addr))
  2119. return PTR_ERR(ccdc->ccdc_cfg.base_addr);
  2120. ret = platform_get_irq(pdev, 0);
  2121. if (ret <= 0) {
  2122. dev_err(&pdev->dev, "No IRQ resource\n");
  2123. return -ENODEV;
  2124. }
  2125. vpfe->irq = ret;
  2126. ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
  2127. "vpfe_capture0", vpfe);
  2128. if (ret) {
  2129. dev_err(&pdev->dev, "Unable to request interrupt\n");
  2130. return -EINVAL;
  2131. }
  2132. ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
  2133. if (ret) {
  2134. vpfe_err(vpfe,
  2135. "Unable to register v4l2 device.\n");
  2136. return ret;
  2137. }
  2138. /* set the driver data in platform device */
  2139. platform_set_drvdata(pdev, vpfe);
  2140. /* Enabling module functional clock */
  2141. pm_runtime_enable(&pdev->dev);
  2142. /* for now just enable it here instead of waiting for the open */
  2143. pm_runtime_get_sync(&pdev->dev);
  2144. vpfe_ccdc_config_defaults(ccdc);
  2145. pm_runtime_put_sync(&pdev->dev);
  2146. vpfe->sd = devm_kzalloc(&pdev->dev, sizeof(struct v4l2_subdev *) *
  2147. ARRAY_SIZE(vpfe->cfg->asd), GFP_KERNEL);
  2148. if (!vpfe->sd) {
  2149. ret = -ENOMEM;
  2150. goto probe_out_v4l2_unregister;
  2151. }
  2152. vpfe->notifier.subdevs = vpfe->cfg->asd;
  2153. vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
  2154. vpfe->notifier.bound = vpfe_async_bound;
  2155. vpfe->notifier.complete = vpfe_async_complete;
  2156. ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
  2157. &vpfe->notifier);
  2158. if (ret) {
  2159. vpfe_err(vpfe, "Error registering async notifier\n");
  2160. ret = -EINVAL;
  2161. goto probe_out_v4l2_unregister;
  2162. }
  2163. return 0;
  2164. probe_out_v4l2_unregister:
  2165. v4l2_device_unregister(&vpfe->v4l2_dev);
  2166. return ret;
  2167. }
  2168. /*
  2169. * vpfe_remove : It un-register device from V4L2 driver
  2170. */
  2171. static int vpfe_remove(struct platform_device *pdev)
  2172. {
  2173. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2174. vpfe_dbg(2, vpfe, "vpfe_remove\n");
  2175. pm_runtime_disable(&pdev->dev);
  2176. v4l2_async_notifier_unregister(&vpfe->notifier);
  2177. v4l2_device_unregister(&vpfe->v4l2_dev);
  2178. video_unregister_device(&vpfe->video_dev);
  2179. return 0;
  2180. }
  2181. #ifdef CONFIG_PM_SLEEP
  2182. static void vpfe_save_context(struct vpfe_ccdc *ccdc)
  2183. {
  2184. ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
  2185. ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
  2186. ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
  2187. ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
  2188. ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
  2189. ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
  2190. ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
  2191. ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
  2192. ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
  2193. ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
  2194. ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
  2195. ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
  2196. ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
  2197. ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
  2198. VPFE_HD_VD_WID);
  2199. ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
  2200. VPFE_PIX_LINES);
  2201. ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
  2202. VPFE_HORZ_INFO);
  2203. ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
  2204. VPFE_VERT_START);
  2205. ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
  2206. VPFE_VERT_LINES);
  2207. ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
  2208. VPFE_HSIZE_OFF);
  2209. }
  2210. static int vpfe_suspend(struct device *dev)
  2211. {
  2212. struct platform_device *pdev = to_platform_device(dev);
  2213. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2214. struct vpfe_ccdc *ccdc = &vpfe->ccdc;
  2215. /* if streaming has not started we don't care */
  2216. if (!vb2_start_streaming_called(&vpfe->buffer_queue))
  2217. return 0;
  2218. pm_runtime_get_sync(dev);
  2219. vpfe_config_enable(ccdc, 1);
  2220. /* Save VPFE context */
  2221. vpfe_save_context(ccdc);
  2222. /* Disable CCDC */
  2223. vpfe_pcr_enable(ccdc, 0);
  2224. vpfe_config_enable(ccdc, 0);
  2225. /* Disable both master and slave clock */
  2226. pm_runtime_put_sync(dev);
  2227. /* Select sleep pin state */
  2228. pinctrl_pm_select_sleep_state(dev);
  2229. return 0;
  2230. }
  2231. static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
  2232. {
  2233. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
  2234. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
  2235. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
  2236. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
  2237. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
  2238. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
  2239. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
  2240. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
  2241. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
  2242. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
  2243. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
  2244. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
  2245. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
  2246. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
  2247. VPFE_HD_VD_WID);
  2248. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
  2249. VPFE_PIX_LINES);
  2250. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
  2251. VPFE_HORZ_INFO);
  2252. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
  2253. VPFE_VERT_START);
  2254. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
  2255. VPFE_VERT_LINES);
  2256. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
  2257. VPFE_HSIZE_OFF);
  2258. }
  2259. static int vpfe_resume(struct device *dev)
  2260. {
  2261. struct platform_device *pdev = to_platform_device(dev);
  2262. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2263. struct vpfe_ccdc *ccdc = &vpfe->ccdc;
  2264. /* if streaming has not started we don't care */
  2265. if (!vb2_start_streaming_called(&vpfe->buffer_queue))
  2266. return 0;
  2267. /* Enable both master and slave clock */
  2268. pm_runtime_get_sync(dev);
  2269. vpfe_config_enable(ccdc, 1);
  2270. /* Restore VPFE context */
  2271. vpfe_restore_context(ccdc);
  2272. vpfe_config_enable(ccdc, 0);
  2273. pm_runtime_put_sync(dev);
  2274. /* Select default pin state */
  2275. pinctrl_pm_select_default_state(dev);
  2276. return 0;
  2277. }
  2278. #endif
  2279. static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
  2280. static const struct of_device_id vpfe_of_match[] = {
  2281. { .compatible = "ti,am437x-vpfe", },
  2282. { /* sentinel */ },
  2283. };
  2284. MODULE_DEVICE_TABLE(of, vpfe_of_match);
  2285. static struct platform_driver vpfe_driver = {
  2286. .probe = vpfe_probe,
  2287. .remove = vpfe_remove,
  2288. .driver = {
  2289. .name = VPFE_MODULE_NAME,
  2290. .pm = &vpfe_pm_ops,
  2291. .of_match_table = of_match_ptr(vpfe_of_match),
  2292. },
  2293. };
  2294. module_platform_driver(vpfe_driver);
  2295. MODULE_AUTHOR("Texas Instruments");
  2296. MODULE_DESCRIPTION("TI AM437x VPFE driver");
  2297. MODULE_LICENSE("GPL");
  2298. MODULE_VERSION(VPFE_VERSION);