c8sectpfe-core.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237
  1. /*
  2. * c8sectpfe-core.c - C8SECTPFE STi DVB driver
  3. *
  4. * Copyright (c) STMicroelectronics 2015
  5. *
  6. * Author:Peter Bennett <peter.bennett@st.com>
  7. * Peter Griffin <peter.griffin@linaro.org>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License as
  11. * published by the Free Software Foundation; either version 2 of
  12. * the License, or (at your option) any later version.
  13. */
  14. #include <linux/atomic.h>
  15. #include <linux/clk.h>
  16. #include <linux/completion.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/dvb/dmx.h>
  21. #include <linux/dvb/frontend.h>
  22. #include <linux/errno.h>
  23. #include <linux/firmware.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/of_gpio.h>
  29. #include <linux/of_platform.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/usb.h>
  32. #include <linux/slab.h>
  33. #include <linux/time.h>
  34. #include <linux/version.h>
  35. #include <linux/wait.h>
  36. #include <linux/pinctrl/pinctrl.h>
  37. #include "c8sectpfe-core.h"
  38. #include "c8sectpfe-common.h"
  39. #include "c8sectpfe-debugfs.h"
  40. #include "dmxdev.h"
  41. #include "dvb_demux.h"
  42. #include "dvb_frontend.h"
  43. #include "dvb_net.h"
  44. #define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
  45. MODULE_FIRMWARE(FIRMWARE_MEMDMA);
  46. #define PID_TABLE_SIZE 1024
  47. #define POLL_MSECS 50
  48. static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei);
  49. #define TS_PKT_SIZE 188
  50. #define HEADER_SIZE (4)
  51. #define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
  52. #define FEI_ALIGNMENT (32)
  53. /* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
  54. #define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
  55. #define FIFO_LEN 1024
  56. static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
  57. {
  58. struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
  59. struct channel_info *channel;
  60. int chan_num;
  61. /* iterate through input block channels */
  62. for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
  63. channel = fei->channel_data[chan_num];
  64. /* is this descriptor initialised and TP enabled */
  65. if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
  66. tasklet_schedule(&channel->tsklet);
  67. }
  68. fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
  69. add_timer(&fei->timer);
  70. }
  71. static void channel_swdemux_tsklet(unsigned long data)
  72. {
  73. struct channel_info *channel = (struct channel_info *)data;
  74. struct c8sectpfei *fei;
  75. unsigned long wp, rp;
  76. int pos, num_packets, n, size;
  77. u8 *buf;
  78. if (unlikely(!channel || !channel->irec))
  79. return;
  80. fei = channel->fei;
  81. wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
  82. rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
  83. pos = rp - channel->back_buffer_busaddr;
  84. /* has it wrapped */
  85. if (wp < rp)
  86. wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
  87. size = wp - rp;
  88. num_packets = size / PACKET_SIZE;
  89. /* manage cache so data is visible to CPU */
  90. dma_sync_single_for_cpu(fei->dev,
  91. rp,
  92. size,
  93. DMA_FROM_DEVICE);
  94. buf = (u8 *) channel->back_buffer_aligned;
  95. dev_dbg(fei->dev,
  96. "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
  97. "rp=0x%lx, wp=0x%lx\n",
  98. channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
  99. for (n = 0; n < num_packets; n++) {
  100. dvb_dmx_swfilter_packets(
  101. &fei->c8sectpfe[0]->
  102. demux[channel->demux_mapping].dvb_demux,
  103. &buf[pos], 1);
  104. pos += PACKET_SIZE;
  105. }
  106. /* advance the read pointer */
  107. if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
  108. writel(channel->back_buffer_busaddr, channel->irec +
  109. DMA_PRDS_BUSRP_TP(0));
  110. else
  111. writel(wp, channel->irec + DMA_PRDS_BUSWP_TP(0));
  112. }
  113. static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
  114. {
  115. struct dvb_demux *demux = dvbdmxfeed->demux;
  116. struct stdemux *stdemux = (struct stdemux *)demux->priv;
  117. struct c8sectpfei *fei = stdemux->c8sectpfei;
  118. struct channel_info *channel;
  119. u32 tmp;
  120. unsigned long *bitmap;
  121. switch (dvbdmxfeed->type) {
  122. case DMX_TYPE_TS:
  123. break;
  124. case DMX_TYPE_SEC:
  125. break;
  126. default:
  127. dev_err(fei->dev, "%s:%d Error bailing\n"
  128. , __func__, __LINE__);
  129. return -EINVAL;
  130. }
  131. if (dvbdmxfeed->type == DMX_TYPE_TS) {
  132. switch (dvbdmxfeed->pes_type) {
  133. case DMX_PES_VIDEO:
  134. case DMX_PES_AUDIO:
  135. case DMX_PES_TELETEXT:
  136. case DMX_PES_PCR:
  137. case DMX_PES_OTHER:
  138. break;
  139. default:
  140. dev_err(fei->dev, "%s:%d Error bailing\n"
  141. , __func__, __LINE__);
  142. return -EINVAL;
  143. }
  144. }
  145. if (!atomic_read(&fei->fw_loaded)) {
  146. dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
  147. return -EINVAL;
  148. }
  149. mutex_lock(&fei->lock);
  150. channel = fei->channel_data[stdemux->tsin_index];
  151. bitmap = (unsigned long *) channel->pid_buffer_aligned;
  152. /* 8192 is a special PID */
  153. if (dvbdmxfeed->pid == 8192) {
  154. tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
  155. tmp &= ~C8SECTPFE_PID_ENABLE;
  156. writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
  157. } else {
  158. bitmap_set(bitmap, dvbdmxfeed->pid, 1);
  159. }
  160. /* manage cache so PID bitmap is visible to HW */
  161. dma_sync_single_for_device(fei->dev,
  162. channel->pid_buffer_busaddr,
  163. PID_TABLE_SIZE,
  164. DMA_TO_DEVICE);
  165. channel->active = 1;
  166. if (fei->global_feed_count == 0) {
  167. fei->timer.expires = jiffies +
  168. msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
  169. add_timer(&fei->timer);
  170. }
  171. if (stdemux->running_feed_count == 0) {
  172. dev_dbg(fei->dev, "Starting channel=%p\n", channel);
  173. tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
  174. (unsigned long) channel);
  175. /* Reset the internal inputblock sram pointers */
  176. writel(channel->fifo,
  177. fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
  178. writel(channel->fifo + FIFO_LEN - 1,
  179. fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
  180. writel(channel->fifo,
  181. fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
  182. writel(channel->fifo,
  183. fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
  184. /* reset read / write memdma ptrs for this channel */
  185. writel(channel->back_buffer_busaddr, channel->irec +
  186. DMA_PRDS_BUSBASE_TP(0));
  187. tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
  188. writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
  189. writel(channel->back_buffer_busaddr, channel->irec +
  190. DMA_PRDS_BUSWP_TP(0));
  191. /* Issue a reset and enable InputBlock */
  192. writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
  193. , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
  194. /* and enable the tp */
  195. writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
  196. dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
  197. , __func__, __LINE__, stdemux);
  198. }
  199. stdemux->running_feed_count++;
  200. fei->global_feed_count++;
  201. mutex_unlock(&fei->lock);
  202. return 0;
  203. }
  204. static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
  205. {
  206. struct dvb_demux *demux = dvbdmxfeed->demux;
  207. struct stdemux *stdemux = (struct stdemux *)demux->priv;
  208. struct c8sectpfei *fei = stdemux->c8sectpfei;
  209. struct channel_info *channel;
  210. int idlereq;
  211. u32 tmp;
  212. int ret;
  213. unsigned long *bitmap;
  214. if (!atomic_read(&fei->fw_loaded)) {
  215. dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
  216. return -EINVAL;
  217. }
  218. mutex_lock(&fei->lock);
  219. channel = fei->channel_data[stdemux->tsin_index];
  220. bitmap = (unsigned long *) channel->pid_buffer_aligned;
  221. if (dvbdmxfeed->pid == 8192) {
  222. tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
  223. tmp |= C8SECTPFE_PID_ENABLE;
  224. writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
  225. } else {
  226. bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
  227. }
  228. /* manage cache so data is visible to HW */
  229. dma_sync_single_for_device(fei->dev,
  230. channel->pid_buffer_busaddr,
  231. PID_TABLE_SIZE,
  232. DMA_TO_DEVICE);
  233. if (--stdemux->running_feed_count == 0) {
  234. channel = fei->channel_data[stdemux->tsin_index];
  235. /* TP re-configuration on page 168 of functional spec */
  236. /* disable IB (prevents more TS data going to memdma) */
  237. writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
  238. /* disable this channels descriptor */
  239. writel(0, channel->irec + DMA_PRDS_TPENABLE);
  240. tasklet_disable(&channel->tsklet);
  241. /* now request memdma channel goes idle */
  242. idlereq = (1 << channel->tsin_id) | IDLEREQ;
  243. writel(idlereq, fei->io + DMA_IDLE_REQ);
  244. /* wait for idle irq handler to signal completion */
  245. ret = wait_for_completion_timeout(&channel->idle_completion,
  246. msecs_to_jiffies(100));
  247. if (ret == 0)
  248. dev_warn(fei->dev,
  249. "Timeout waiting for idle irq on tsin%d\n",
  250. channel->tsin_id);
  251. reinit_completion(&channel->idle_completion);
  252. /* reset read / write ptrs for this channel */
  253. writel(channel->back_buffer_busaddr,
  254. channel->irec + DMA_PRDS_BUSBASE_TP(0));
  255. tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
  256. writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
  257. writel(channel->back_buffer_busaddr,
  258. channel->irec + DMA_PRDS_BUSWP_TP(0));
  259. dev_dbg(fei->dev,
  260. "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
  261. __func__, __LINE__, stdemux, channel->tsin_id);
  262. /* turn off all PIDS in the bitmap */
  263. memset((void *)channel->pid_buffer_aligned
  264. , 0x00, PID_TABLE_SIZE);
  265. /* manage cache so data is visible to HW */
  266. dma_sync_single_for_device(fei->dev,
  267. channel->pid_buffer_busaddr,
  268. PID_TABLE_SIZE,
  269. DMA_TO_DEVICE);
  270. channel->active = 0;
  271. }
  272. if (--fei->global_feed_count == 0) {
  273. dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
  274. , __func__, __LINE__, fei->global_feed_count);
  275. del_timer(&fei->timer);
  276. }
  277. mutex_unlock(&fei->lock);
  278. return 0;
  279. }
  280. static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
  281. {
  282. int i;
  283. for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
  284. if (!fei->channel_data[i])
  285. continue;
  286. if (fei->channel_data[i]->tsin_id == tsin_num)
  287. return fei->channel_data[i];
  288. }
  289. return NULL;
  290. }
  291. static void c8sectpfe_getconfig(struct c8sectpfei *fei)
  292. {
  293. struct c8sectpfe_hw *hw = &fei->hw_stats;
  294. hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
  295. hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
  296. hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
  297. hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
  298. hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
  299. hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
  300. hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
  301. dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
  302. dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
  303. dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
  304. dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
  305. , hw->num_swts);
  306. dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
  307. dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
  308. dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
  309. dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
  310. , hw->num_tp);
  311. }
  312. static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
  313. {
  314. struct c8sectpfei *fei = priv;
  315. struct channel_info *chan;
  316. int bit;
  317. unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
  318. /* page 168 of functional spec: Clear the idle request
  319. by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
  320. /* signal idle completion */
  321. for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
  322. chan = find_channel(fei, bit);
  323. if (chan)
  324. complete(&chan->idle_completion);
  325. }
  326. writel(0, fei->io + DMA_IDLE_REQ);
  327. return IRQ_HANDLED;
  328. }
  329. static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
  330. {
  331. if (!fei || !tsin)
  332. return;
  333. if (tsin->back_buffer_busaddr)
  334. if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
  335. dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
  336. FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
  337. kfree(tsin->back_buffer_start);
  338. if (tsin->pid_buffer_busaddr)
  339. if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
  340. dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
  341. PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
  342. kfree(tsin->pid_buffer_start);
  343. }
  344. #define MAX_NAME 20
  345. static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
  346. struct channel_info *tsin)
  347. {
  348. int ret;
  349. u32 tmp;
  350. char tsin_pin_name[MAX_NAME];
  351. if (!fei || !tsin)
  352. return -EINVAL;
  353. dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
  354. , __func__, __LINE__, tsin, tsin->tsin_id);
  355. init_completion(&tsin->idle_completion);
  356. tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
  357. FEI_ALIGNMENT, GFP_KERNEL);
  358. if (!tsin->back_buffer_start) {
  359. ret = -ENOMEM;
  360. goto err_unmap;
  361. }
  362. /* Ensure backbuffer is 32byte aligned */
  363. tsin->back_buffer_aligned = tsin->back_buffer_start
  364. + FEI_ALIGNMENT;
  365. tsin->back_buffer_aligned = (void *)
  366. (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
  367. tsin->back_buffer_busaddr = dma_map_single(fei->dev,
  368. (void *)tsin->back_buffer_aligned,
  369. FEI_BUFFER_SIZE,
  370. DMA_BIDIRECTIONAL);
  371. if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
  372. dev_err(fei->dev, "failed to map back_buffer\n");
  373. ret = -EFAULT;
  374. goto err_unmap;
  375. }
  376. /*
  377. * The pid buffer can be configured (in hw) for byte or bit
  378. * per pid. By powers of deduction we conclude stih407 family
  379. * is configured (at SoC design stage) for bit per pid.
  380. */
  381. tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
  382. if (!tsin->pid_buffer_start) {
  383. ret = -ENOMEM;
  384. goto err_unmap;
  385. }
  386. /*
  387. * PID buffer needs to be aligned to size of the pid table
  388. * which at bit per pid is 1024 bytes (8192 pids / 8).
  389. * PIDF_BASE register enforces this alignment when writing
  390. * the register.
  391. */
  392. tsin->pid_buffer_aligned = tsin->pid_buffer_start +
  393. PID_TABLE_SIZE;
  394. tsin->pid_buffer_aligned = (void *)
  395. (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
  396. tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
  397. tsin->pid_buffer_aligned,
  398. PID_TABLE_SIZE,
  399. DMA_BIDIRECTIONAL);
  400. if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
  401. dev_err(fei->dev, "failed to map pid_bitmap\n");
  402. ret = -EFAULT;
  403. goto err_unmap;
  404. }
  405. /* manage cache so pid bitmap is visible to HW */
  406. dma_sync_single_for_device(fei->dev,
  407. tsin->pid_buffer_busaddr,
  408. PID_TABLE_SIZE,
  409. DMA_TO_DEVICE);
  410. snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
  411. (tsin->serial_not_parallel ? "serial" : "parallel"));
  412. tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
  413. if (IS_ERR(tsin->pstate)) {
  414. dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
  415. , __func__, tsin_pin_name);
  416. ret = PTR_ERR(tsin->pstate);
  417. goto err_unmap;
  418. }
  419. ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
  420. if (ret) {
  421. dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
  422. , __func__);
  423. goto err_unmap;
  424. }
  425. /* Enable this input block */
  426. tmp = readl(fei->io + SYS_INPUT_CLKEN);
  427. tmp |= BIT(tsin->tsin_id);
  428. writel(tmp, fei->io + SYS_INPUT_CLKEN);
  429. if (tsin->serial_not_parallel)
  430. tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
  431. if (tsin->invert_ts_clk)
  432. tmp |= C8SECTPFE_INVERT_TSCLK;
  433. if (tsin->async_not_sync)
  434. tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
  435. tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
  436. writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
  437. writel(C8SECTPFE_SYNC(0x9) |
  438. C8SECTPFE_DROP(0x9) |
  439. C8SECTPFE_TOKEN(0x47),
  440. fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
  441. writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
  442. /* Place the FIFO's at the end of the irec descriptors */
  443. tsin->fifo = (tsin->tsin_id * FIFO_LEN);
  444. writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
  445. writel(tsin->fifo + FIFO_LEN - 1,
  446. fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
  447. writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
  448. writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
  449. writel(tsin->pid_buffer_busaddr,
  450. fei->io + PIDF_BASE(tsin->tsin_id));
  451. dev_info(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
  452. tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
  453. &tsin->pid_buffer_busaddr);
  454. /* Configure and enable HW PID filtering */
  455. /*
  456. * The PID value is created by assembling the first 8 bytes of
  457. * the TS packet into a 64-bit word in big-endian format. A
  458. * slice of that 64-bit word is taken from
  459. * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
  460. */
  461. tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
  462. | C8SECTPFE_PID_OFFSET(40));
  463. writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
  464. dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
  465. tsin->tsin_id,
  466. readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
  467. readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
  468. readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
  469. readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
  470. /* Get base addpress of pointer record block from DMEM */
  471. tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
  472. readl(fei->io + DMA_PTRREC_BASE);
  473. /* fill out pointer record data structure */
  474. /* advance pointer record block to our channel */
  475. tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
  476. writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
  477. writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
  478. writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
  479. writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
  480. /* read/write pointers with physical bus address */
  481. writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
  482. tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
  483. writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
  484. writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
  485. writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
  486. /* initialize tasklet */
  487. tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
  488. (unsigned long) tsin);
  489. return 0;
  490. err_unmap:
  491. free_input_block(fei, tsin);
  492. return ret;
  493. }
  494. static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
  495. {
  496. struct c8sectpfei *fei = priv;
  497. dev_err(fei->dev, "%s: error handling not yet implemented\n"
  498. , __func__);
  499. /*
  500. * TODO FIXME we should detect some error conditions here
  501. * and ideally so something about them!
  502. */
  503. return IRQ_HANDLED;
  504. }
  505. static int c8sectpfe_probe(struct platform_device *pdev)
  506. {
  507. struct device *dev = &pdev->dev;
  508. struct device_node *child, *np = dev->of_node;
  509. struct c8sectpfei *fei;
  510. struct resource *res;
  511. int ret, index = 0;
  512. struct channel_info *tsin;
  513. /* Allocate the c8sectpfei structure */
  514. fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
  515. if (!fei)
  516. return -ENOMEM;
  517. fei->dev = dev;
  518. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
  519. fei->io = devm_ioremap_resource(dev, res);
  520. if (IS_ERR(fei->io))
  521. return PTR_ERR(fei->io);
  522. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  523. "c8sectpfe-ram");
  524. fei->sram = devm_ioremap_resource(dev, res);
  525. if (IS_ERR(fei->sram))
  526. return PTR_ERR(fei->sram);
  527. fei->sram_size = res->end - res->start;
  528. fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
  529. if (fei->idle_irq < 0) {
  530. dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
  531. return fei->idle_irq;
  532. }
  533. fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
  534. if (fei->error_irq < 0) {
  535. dev_err(dev, "Can't get c8sectpfe-error-irq\n");
  536. return fei->error_irq;
  537. }
  538. platform_set_drvdata(pdev, fei);
  539. fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
  540. if (IS_ERR(fei->c8sectpfeclk)) {
  541. dev_err(dev, "c8sectpfe clk not found\n");
  542. return PTR_ERR(fei->c8sectpfeclk);
  543. }
  544. ret = clk_prepare_enable(fei->c8sectpfeclk);
  545. if (ret) {
  546. dev_err(dev, "Failed to enable c8sectpfe clock\n");
  547. return ret;
  548. }
  549. /* to save power disable all IP's (on by default) */
  550. writel(0, fei->io + SYS_INPUT_CLKEN);
  551. /* Enable memdma clock */
  552. writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
  553. /* clear internal sram */
  554. memset_io(fei->sram, 0x0, fei->sram_size);
  555. c8sectpfe_getconfig(fei);
  556. ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
  557. 0, "c8sectpfe-idle-irq", fei);
  558. if (ret) {
  559. dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
  560. goto err_clk_disable;
  561. }
  562. ret = devm_request_irq(dev, fei->error_irq,
  563. c8sectpfe_error_irq_handler, 0,
  564. "c8sectpfe-error-irq", fei);
  565. if (ret) {
  566. dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
  567. goto err_clk_disable;
  568. }
  569. fei->tsin_count = of_get_child_count(np);
  570. if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
  571. fei->tsin_count > fei->hw_stats.num_ib) {
  572. dev_err(dev, "More tsin declared than exist on SoC!\n");
  573. ret = -EINVAL;
  574. goto err_clk_disable;
  575. }
  576. fei->pinctrl = devm_pinctrl_get(dev);
  577. if (IS_ERR(fei->pinctrl)) {
  578. dev_err(dev, "Error getting tsin pins\n");
  579. ret = PTR_ERR(fei->pinctrl);
  580. goto err_clk_disable;
  581. }
  582. for_each_child_of_node(np, child) {
  583. struct device_node *i2c_bus;
  584. fei->channel_data[index] = devm_kzalloc(dev,
  585. sizeof(struct channel_info),
  586. GFP_KERNEL);
  587. if (!fei->channel_data[index]) {
  588. ret = -ENOMEM;
  589. goto err_clk_disable;
  590. }
  591. tsin = fei->channel_data[index];
  592. tsin->fei = fei;
  593. ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
  594. if (ret) {
  595. dev_err(&pdev->dev, "No tsin_num found\n");
  596. goto err_clk_disable;
  597. }
  598. /* sanity check value */
  599. if (tsin->tsin_id > fei->hw_stats.num_ib) {
  600. dev_err(&pdev->dev,
  601. "tsin-num %d specified greater than number\n\t"
  602. "of input block hw in SoC! (%d)",
  603. tsin->tsin_id, fei->hw_stats.num_ib);
  604. ret = -EINVAL;
  605. goto err_clk_disable;
  606. }
  607. tsin->invert_ts_clk = of_property_read_bool(child,
  608. "invert-ts-clk");
  609. tsin->serial_not_parallel = of_property_read_bool(child,
  610. "serial-not-parallel");
  611. tsin->async_not_sync = of_property_read_bool(child,
  612. "async-not-sync");
  613. ret = of_property_read_u32(child, "dvb-card",
  614. &tsin->dvb_card);
  615. if (ret) {
  616. dev_err(&pdev->dev, "No dvb-card found\n");
  617. goto err_clk_disable;
  618. }
  619. i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
  620. if (!i2c_bus) {
  621. dev_err(&pdev->dev, "No i2c-bus found\n");
  622. goto err_clk_disable;
  623. }
  624. tsin->i2c_adapter =
  625. of_find_i2c_adapter_by_node(i2c_bus);
  626. if (!tsin->i2c_adapter) {
  627. dev_err(&pdev->dev, "No i2c adapter found\n");
  628. of_node_put(i2c_bus);
  629. goto err_clk_disable;
  630. }
  631. of_node_put(i2c_bus);
  632. tsin->rst_gpio = of_get_named_gpio(child, "rst-gpio", 0);
  633. ret = gpio_is_valid(tsin->rst_gpio);
  634. if (!ret) {
  635. dev_err(dev,
  636. "reset gpio for tsin%d not valid (gpio=%d)\n",
  637. tsin->tsin_id, tsin->rst_gpio);
  638. goto err_clk_disable;
  639. }
  640. ret = devm_gpio_request_one(dev, tsin->rst_gpio,
  641. GPIOF_OUT_INIT_LOW, "NIM reset");
  642. if (ret && ret != -EBUSY) {
  643. dev_err(dev, "Can't request tsin%d reset gpio\n"
  644. , fei->channel_data[index]->tsin_id);
  645. goto err_clk_disable;
  646. }
  647. if (!ret) {
  648. /* toggle reset lines */
  649. gpio_direction_output(tsin->rst_gpio, 0);
  650. usleep_range(3500, 5000);
  651. gpio_direction_output(tsin->rst_gpio, 1);
  652. usleep_range(3000, 5000);
  653. }
  654. tsin->demux_mapping = index;
  655. dev_dbg(fei->dev,
  656. "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
  657. "serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
  658. fei->channel_data[index], index,
  659. tsin->tsin_id, tsin->invert_ts_clk,
  660. tsin->serial_not_parallel, tsin->async_not_sync,
  661. tsin->dvb_card);
  662. index++;
  663. }
  664. /* Setup timer interrupt */
  665. init_timer(&fei->timer);
  666. fei->timer.function = c8sectpfe_timer_interrupt;
  667. fei->timer.data = (unsigned long)fei;
  668. mutex_init(&fei->lock);
  669. /* Get the configuration information about the tuners */
  670. ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
  671. (void *)fei,
  672. c8sectpfe_start_feed,
  673. c8sectpfe_stop_feed);
  674. if (ret) {
  675. dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
  676. ret);
  677. goto err_clk_disable;
  678. }
  679. /* ensure all other init has been done before requesting firmware */
  680. ret = load_c8sectpfe_fw_step1(fei);
  681. if (ret) {
  682. dev_err(dev, "Couldn't load slim core firmware\n");
  683. goto err_clk_disable;
  684. }
  685. c8sectpfe_debugfs_init(fei);
  686. return 0;
  687. err_clk_disable:
  688. /* TODO uncomment when upstream has taken a reference on this clk */
  689. /*clk_disable_unprepare(fei->c8sectpfeclk);*/
  690. return ret;
  691. }
  692. static int c8sectpfe_remove(struct platform_device *pdev)
  693. {
  694. struct c8sectpfei *fei = platform_get_drvdata(pdev);
  695. struct channel_info *channel;
  696. int i;
  697. wait_for_completion(&fei->fw_ack);
  698. c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
  699. /*
  700. * Now loop through and un-configure each of the InputBlock resources
  701. */
  702. for (i = 0; i < fei->tsin_count; i++) {
  703. channel = fei->channel_data[i];
  704. free_input_block(fei, channel);
  705. }
  706. c8sectpfe_debugfs_exit(fei);
  707. dev_info(fei->dev, "Stopping memdma SLIM core\n");
  708. if (readl(fei->io + DMA_CPU_RUN))
  709. writel(0x0, fei->io + DMA_CPU_RUN);
  710. /* unclock all internal IP's */
  711. if (readl(fei->io + SYS_INPUT_CLKEN))
  712. writel(0, fei->io + SYS_INPUT_CLKEN);
  713. if (readl(fei->io + SYS_OTHER_CLKEN))
  714. writel(0, fei->io + SYS_OTHER_CLKEN);
  715. /* TODO uncomment when upstream has taken a reference on this clk */
  716. /*
  717. if (fei->c8sectpfeclk)
  718. clk_disable_unprepare(fei->c8sectpfeclk);
  719. */
  720. return 0;
  721. }
  722. static int configure_channels(struct c8sectpfei *fei)
  723. {
  724. int index = 0, ret;
  725. struct channel_info *tsin;
  726. struct device_node *child, *np = fei->dev->of_node;
  727. /* iterate round each tsin and configure memdma descriptor and IB hw */
  728. for_each_child_of_node(np, child) {
  729. tsin = fei->channel_data[index];
  730. ret = configure_memdma_and_inputblock(fei,
  731. fei->channel_data[index]);
  732. if (ret) {
  733. dev_err(fei->dev,
  734. "configure_memdma_and_inputblock failed\n");
  735. goto err_unmap;
  736. }
  737. index++;
  738. }
  739. return 0;
  740. err_unmap:
  741. for (index = 0; index < fei->tsin_count; index++) {
  742. tsin = fei->channel_data[index];
  743. free_input_block(fei, tsin);
  744. }
  745. return ret;
  746. }
  747. static int
  748. c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
  749. {
  750. struct elf32_hdr *ehdr;
  751. char class;
  752. if (!fw) {
  753. dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
  754. return -EINVAL;
  755. }
  756. if (fw->size < sizeof(struct elf32_hdr)) {
  757. dev_err(fei->dev, "Image is too small\n");
  758. return -EINVAL;
  759. }
  760. ehdr = (struct elf32_hdr *)fw->data;
  761. /* We only support ELF32 at this point */
  762. class = ehdr->e_ident[EI_CLASS];
  763. if (class != ELFCLASS32) {
  764. dev_err(fei->dev, "Unsupported class: %d\n", class);
  765. return -EINVAL;
  766. }
  767. if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
  768. dev_err(fei->dev, "Unsupported firmware endianness\n");
  769. return -EINVAL;
  770. }
  771. if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
  772. dev_err(fei->dev, "Image is too small\n");
  773. return -EINVAL;
  774. }
  775. if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
  776. dev_err(fei->dev, "Image is corrupted (bad magic)\n");
  777. return -EINVAL;
  778. }
  779. /* Check ELF magic */
  780. ehdr = (Elf32_Ehdr *)fw->data;
  781. if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
  782. ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
  783. ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
  784. ehdr->e_ident[EI_MAG3] != ELFMAG3) {
  785. dev_err(fei->dev, "Invalid ELF magic\n");
  786. return -EINVAL;
  787. }
  788. if (ehdr->e_type != ET_EXEC) {
  789. dev_err(fei->dev, "Unsupported ELF header type\n");
  790. return -EINVAL;
  791. }
  792. if (ehdr->e_phoff > fw->size) {
  793. dev_err(fei->dev, "Firmware size is too small\n");
  794. return -EINVAL;
  795. }
  796. return 0;
  797. }
  798. static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
  799. const struct firmware *fw, u8 __iomem *dest,
  800. int seg_num)
  801. {
  802. const u8 *imem_src = fw->data + phdr->p_offset;
  803. int i;
  804. /*
  805. * For IMEM segments, the segment contains 24-bit
  806. * instructions which must be padded to 32-bit
  807. * instructions before being written. The written
  808. * segment is padded with NOP instructions.
  809. */
  810. dev_dbg(fei->dev,
  811. "Loading IMEM segment %d 0x%08x\n\t"
  812. " (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
  813. phdr->p_paddr, phdr->p_filesz,
  814. dest, phdr->p_memsz + phdr->p_memsz / 3);
  815. for (i = 0; i < phdr->p_filesz; i++) {
  816. writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
  817. /* Every 3 bytes, add an additional
  818. * padding zero in destination */
  819. if (i % 3 == 2) {
  820. dest++;
  821. writeb(0x00, (void __iomem *)dest);
  822. }
  823. dest++;
  824. imem_src++;
  825. }
  826. }
  827. static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
  828. const struct firmware *fw, u8 __iomem *dst, int seg_num)
  829. {
  830. /*
  831. * For DMEM segments copy the segment data from the ELF
  832. * file and pad segment with zeroes
  833. */
  834. dev_dbg(fei->dev,
  835. "Loading DMEM segment %d 0x%08x\n\t"
  836. "(0x%x bytes) -> 0x%p (0x%x bytes)\n",
  837. seg_num, phdr->p_paddr, phdr->p_filesz,
  838. dst, phdr->p_memsz);
  839. memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
  840. phdr->p_filesz);
  841. memset((void __force *)dst + phdr->p_filesz, 0,
  842. phdr->p_memsz - phdr->p_filesz);
  843. }
  844. static int load_slim_core_fw(const struct firmware *fw, void *context)
  845. {
  846. struct c8sectpfei *fei = context;
  847. Elf32_Ehdr *ehdr;
  848. Elf32_Phdr *phdr;
  849. u8 __iomem *dst;
  850. int err = 0, i;
  851. if (!fw || !context)
  852. return -EINVAL;
  853. ehdr = (Elf32_Ehdr *)fw->data;
  854. phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
  855. /* go through the available ELF segments */
  856. for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
  857. /* Only consider LOAD segments */
  858. if (phdr->p_type != PT_LOAD)
  859. continue;
  860. /*
  861. * Check segment is contained within the fw->data buffer
  862. */
  863. if (phdr->p_offset + phdr->p_filesz > fw->size) {
  864. dev_err(fei->dev,
  865. "Segment %d is outside of firmware file\n", i);
  866. err = -EINVAL;
  867. break;
  868. }
  869. /*
  870. * MEMDMA IMEM has executable flag set, otherwise load
  871. * this segment into DMEM.
  872. *
  873. */
  874. if (phdr->p_flags & PF_X) {
  875. dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
  876. /*
  877. * The Slim ELF file uses 32-bit word addressing for
  878. * load offsets.
  879. */
  880. dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
  881. load_imem_segment(fei, phdr, fw, dst, i);
  882. } else {
  883. dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
  884. /*
  885. * The Slim ELF file uses 32-bit word addressing for
  886. * load offsets.
  887. */
  888. dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
  889. load_dmem_segment(fei, phdr, fw, dst, i);
  890. }
  891. }
  892. release_firmware(fw);
  893. return err;
  894. }
  895. static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context)
  896. {
  897. struct c8sectpfei *fei = context;
  898. int err;
  899. err = c8sectpfe_elf_sanity_check(fei, fw);
  900. if (err) {
  901. dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
  902. , err);
  903. goto err;
  904. }
  905. err = load_slim_core_fw(fw, context);
  906. if (err) {
  907. dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
  908. goto err;
  909. }
  910. /* now the firmware is loaded configure the input blocks */
  911. err = configure_channels(fei);
  912. if (err) {
  913. dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
  914. goto err;
  915. }
  916. /*
  917. * STBus target port can access IMEM and DMEM ports
  918. * without waiting for CPU
  919. */
  920. writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
  921. dev_info(fei->dev, "Boot the memdma SLIM core\n");
  922. writel(0x1, fei->io + DMA_CPU_RUN);
  923. atomic_set(&fei->fw_loaded, 1);
  924. err:
  925. complete_all(&fei->fw_ack);
  926. }
  927. static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
  928. {
  929. int err;
  930. dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
  931. init_completion(&fei->fw_ack);
  932. atomic_set(&fei->fw_loaded, 0);
  933. err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
  934. FIRMWARE_MEMDMA, fei->dev, GFP_KERNEL, fei,
  935. load_c8sectpfe_fw_cb);
  936. if (err) {
  937. dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
  938. complete_all(&fei->fw_ack);
  939. return err;
  940. }
  941. return 0;
  942. }
  943. static const struct of_device_id c8sectpfe_match[] = {
  944. { .compatible = "st,stih407-c8sectpfe" },
  945. { /* sentinel */ },
  946. };
  947. MODULE_DEVICE_TABLE(of, c8sectpfe_match);
  948. static struct platform_driver c8sectpfe_driver = {
  949. .driver = {
  950. .name = "c8sectpfe",
  951. .of_match_table = of_match_ptr(c8sectpfe_match),
  952. },
  953. .probe = c8sectpfe_probe,
  954. .remove = c8sectpfe_remove,
  955. };
  956. module_platform_driver(c8sectpfe_driver);
  957. MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
  958. MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
  959. MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
  960. MODULE_LICENSE("GPL");