fmdrv_common.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686
  1. /*
  2. * FM Driver for Connectivity chip of Texas Instruments.
  3. *
  4. * This sub-module of FM driver is common for FM RX and TX
  5. * functionality. This module is responsible for:
  6. * 1) Forming group of Channel-8 commands to perform particular
  7. * functionality (eg., frequency set require more than
  8. * one Channel-8 command to be sent to the chip).
  9. * 2) Sending each Channel-8 command to the chip and reading
  10. * response back over Shared Transport.
  11. * 3) Managing TX and RX Queues and Tasklets.
  12. * 4) Handling FM Interrupt packet and taking appropriate action.
  13. * 5) Loading FM firmware to the chip (common, FM TX, and FM RX
  14. * firmware files based on mode selection)
  15. *
  16. * Copyright (C) 2011 Texas Instruments
  17. * Author: Raja Mani <raja_mani@ti.com>
  18. * Author: Manjunatha Halli <manjunatha_halli@ti.com>
  19. *
  20. * This program is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License version 2 as
  22. * published by the Free Software Foundation.
  23. *
  24. * This program is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with this program; if not, write to the Free Software
  31. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  32. *
  33. */
  34. #include <linux/module.h>
  35. #include <linux/firmware.h>
  36. #include <linux/delay.h>
  37. #include "fmdrv.h"
  38. #include "fmdrv_v4l2.h"
  39. #include "fmdrv_common.h"
  40. #include <linux/ti_wilink_st.h>
  41. #include "fmdrv_rx.h"
  42. #include "fmdrv_tx.h"
  43. /* Region info */
  44. static struct region_info region_configs[] = {
  45. /* Europe/US */
  46. {
  47. .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
  48. .bot_freq = 87500, /* 87.5 MHz */
  49. .top_freq = 108000, /* 108 MHz */
  50. .fm_band = 0,
  51. },
  52. /* Japan */
  53. {
  54. .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
  55. .bot_freq = 76000, /* 76 MHz */
  56. .top_freq = 90000, /* 90 MHz */
  57. .fm_band = 1,
  58. },
  59. };
  60. /* Band selection */
  61. static u8 default_radio_region; /* Europe/US */
  62. module_param(default_radio_region, byte, 0);
  63. MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
  64. /* RDS buffer blocks */
  65. static u32 default_rds_buf = 300;
  66. module_param(default_rds_buf, uint, 0444);
  67. MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
  68. /* Radio Nr */
  69. static u32 radio_nr = -1;
  70. module_param(radio_nr, int, 0444);
  71. MODULE_PARM_DESC(radio_nr, "Radio Nr");
  72. /* FM irq handlers forward declaration */
  73. static void fm_irq_send_flag_getcmd(struct fmdev *);
  74. static void fm_irq_handle_flag_getcmd_resp(struct fmdev *);
  75. static void fm_irq_handle_hw_malfunction(struct fmdev *);
  76. static void fm_irq_handle_rds_start(struct fmdev *);
  77. static void fm_irq_send_rdsdata_getcmd(struct fmdev *);
  78. static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *);
  79. static void fm_irq_handle_rds_finish(struct fmdev *);
  80. static void fm_irq_handle_tune_op_ended(struct fmdev *);
  81. static void fm_irq_handle_power_enb(struct fmdev *);
  82. static void fm_irq_handle_low_rssi_start(struct fmdev *);
  83. static void fm_irq_afjump_set_pi(struct fmdev *);
  84. static void fm_irq_handle_set_pi_resp(struct fmdev *);
  85. static void fm_irq_afjump_set_pimask(struct fmdev *);
  86. static void fm_irq_handle_set_pimask_resp(struct fmdev *);
  87. static void fm_irq_afjump_setfreq(struct fmdev *);
  88. static void fm_irq_handle_setfreq_resp(struct fmdev *);
  89. static void fm_irq_afjump_enableint(struct fmdev *);
  90. static void fm_irq_afjump_enableint_resp(struct fmdev *);
  91. static void fm_irq_start_afjump(struct fmdev *);
  92. static void fm_irq_handle_start_afjump_resp(struct fmdev *);
  93. static void fm_irq_afjump_rd_freq(struct fmdev *);
  94. static void fm_irq_afjump_rd_freq_resp(struct fmdev *);
  95. static void fm_irq_handle_low_rssi_finish(struct fmdev *);
  96. static void fm_irq_send_intmsk_cmd(struct fmdev *);
  97. static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *);
  98. /*
  99. * When FM common module receives interrupt packet, following handlers
  100. * will be executed one after another to service the interrupt(s)
  101. */
  102. enum fmc_irq_handler_index {
  103. FM_SEND_FLAG_GETCMD_IDX,
  104. FM_HANDLE_FLAG_GETCMD_RESP_IDX,
  105. /* HW malfunction irq handler */
  106. FM_HW_MAL_FUNC_IDX,
  107. /* RDS threshold reached irq handler */
  108. FM_RDS_START_IDX,
  109. FM_RDS_SEND_RDS_GETCMD_IDX,
  110. FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX,
  111. FM_RDS_FINISH_IDX,
  112. /* Tune operation ended irq handler */
  113. FM_HW_TUNE_OP_ENDED_IDX,
  114. /* TX power enable irq handler */
  115. FM_HW_POWER_ENB_IDX,
  116. /* Low RSSI irq handler */
  117. FM_LOW_RSSI_START_IDX,
  118. FM_AF_JUMP_SETPI_IDX,
  119. FM_AF_JUMP_HANDLE_SETPI_RESP_IDX,
  120. FM_AF_JUMP_SETPI_MASK_IDX,
  121. FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX,
  122. FM_AF_JUMP_SET_AF_FREQ_IDX,
  123. FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX,
  124. FM_AF_JUMP_ENABLE_INT_IDX,
  125. FM_AF_JUMP_ENABLE_INT_RESP_IDX,
  126. FM_AF_JUMP_START_AFJUMP_IDX,
  127. FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX,
  128. FM_AF_JUMP_RD_FREQ_IDX,
  129. FM_AF_JUMP_RD_FREQ_RESP_IDX,
  130. FM_LOW_RSSI_FINISH_IDX,
  131. /* Interrupt process post action */
  132. FM_SEND_INTMSK_CMD_IDX,
  133. FM_HANDLE_INTMSK_CMD_RESP_IDX,
  134. };
  135. /* FM interrupt handler table */
  136. static int_handler_prototype int_handler_table[] = {
  137. fm_irq_send_flag_getcmd,
  138. fm_irq_handle_flag_getcmd_resp,
  139. fm_irq_handle_hw_malfunction,
  140. fm_irq_handle_rds_start, /* RDS threshold reached irq handler */
  141. fm_irq_send_rdsdata_getcmd,
  142. fm_irq_handle_rdsdata_getcmd_resp,
  143. fm_irq_handle_rds_finish,
  144. fm_irq_handle_tune_op_ended,
  145. fm_irq_handle_power_enb, /* TX power enable irq handler */
  146. fm_irq_handle_low_rssi_start,
  147. fm_irq_afjump_set_pi,
  148. fm_irq_handle_set_pi_resp,
  149. fm_irq_afjump_set_pimask,
  150. fm_irq_handle_set_pimask_resp,
  151. fm_irq_afjump_setfreq,
  152. fm_irq_handle_setfreq_resp,
  153. fm_irq_afjump_enableint,
  154. fm_irq_afjump_enableint_resp,
  155. fm_irq_start_afjump,
  156. fm_irq_handle_start_afjump_resp,
  157. fm_irq_afjump_rd_freq,
  158. fm_irq_afjump_rd_freq_resp,
  159. fm_irq_handle_low_rssi_finish,
  160. fm_irq_send_intmsk_cmd, /* Interrupt process post action */
  161. fm_irq_handle_intmsk_cmd_resp
  162. };
  163. static long (*g_st_write) (struct sk_buff *skb);
  164. static struct completion wait_for_fmdrv_reg_comp;
  165. static inline void fm_irq_call(struct fmdev *fmdev)
  166. {
  167. fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
  168. }
  169. /* Continue next function in interrupt handler table */
  170. static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage)
  171. {
  172. fmdev->irq_info.stage = stage;
  173. fm_irq_call(fmdev);
  174. }
  175. static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage)
  176. {
  177. fmdev->irq_info.stage = stage;
  178. mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
  179. }
  180. #ifdef FM_DUMP_TXRX_PKT
  181. /* To dump outgoing FM Channel-8 packets */
  182. inline void dump_tx_skb_data(struct sk_buff *skb)
  183. {
  184. int len, len_org;
  185. u8 index;
  186. struct fm_cmd_msg_hdr *cmd_hdr;
  187. cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
  188. printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x",
  189. fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
  190. cmd_hdr->len, cmd_hdr->op,
  191. cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen);
  192. len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
  193. if (len_org > 0) {
  194. printk("\n data(%d): ", cmd_hdr->dlen);
  195. len = min(len_org, 14);
  196. for (index = 0; index < len; index++)
  197. printk("%x ",
  198. skb->data[FM_CMD_MSG_HDR_SIZE + index]);
  199. printk("%s", (len_org > 14) ? ".." : "");
  200. }
  201. printk("\n");
  202. }
  203. /* To dump incoming FM Channel-8 packets */
  204. inline void dump_rx_skb_data(struct sk_buff *skb)
  205. {
  206. int len, len_org;
  207. u8 index;
  208. struct fm_event_msg_hdr *evt_hdr;
  209. evt_hdr = (struct fm_event_msg_hdr *)skb->data;
  210. printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x "
  211. "opcode:%02x type:%s dlen:%02x", evt_hdr->hdr, evt_hdr->len,
  212. evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
  213. (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
  214. len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
  215. if (len_org > 0) {
  216. printk("\n data(%d): ", evt_hdr->dlen);
  217. len = min(len_org, 14);
  218. for (index = 0; index < len; index++)
  219. printk("%x ",
  220. skb->data[FM_EVT_MSG_HDR_SIZE + index]);
  221. printk("%s", (len_org > 14) ? ".." : "");
  222. }
  223. printk("\n");
  224. }
  225. #endif
  226. void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
  227. {
  228. fmdev->rx.region = region_configs[region_to_set];
  229. }
  230. /*
  231. * FM common sub-module will schedule this tasklet whenever it receives
  232. * FM packet from ST driver.
  233. */
  234. static void recv_tasklet(unsigned long arg)
  235. {
  236. struct fmdev *fmdev;
  237. struct fm_irq *irq_info;
  238. struct fm_event_msg_hdr *evt_hdr;
  239. struct sk_buff *skb;
  240. u8 num_fm_hci_cmds;
  241. unsigned long flags;
  242. fmdev = (struct fmdev *)arg;
  243. irq_info = &fmdev->irq_info;
  244. /* Process all packets in the RX queue */
  245. while ((skb = skb_dequeue(&fmdev->rx_q))) {
  246. if (skb->len < sizeof(struct fm_event_msg_hdr)) {
  247. fmerr("skb(%p) has only %d bytes, "
  248. "at least need %zu bytes to decode\n", skb,
  249. skb->len, sizeof(struct fm_event_msg_hdr));
  250. kfree_skb(skb);
  251. continue;
  252. }
  253. evt_hdr = (void *)skb->data;
  254. num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds;
  255. /* FM interrupt packet? */
  256. if (evt_hdr->op == FM_INTERRUPT) {
  257. /* FM interrupt handler started already? */
  258. if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
  259. set_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  260. if (irq_info->stage != 0) {
  261. fmerr("Inval stage resetting to zero\n");
  262. irq_info->stage = 0;
  263. }
  264. /*
  265. * Execute first function in interrupt handler
  266. * table.
  267. */
  268. irq_info->handlers[irq_info->stage](fmdev);
  269. } else {
  270. set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag);
  271. }
  272. kfree_skb(skb);
  273. }
  274. /* Anyone waiting for this with completion handler? */
  275. else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) {
  276. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  277. fmdev->resp_skb = skb;
  278. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  279. complete(fmdev->resp_comp);
  280. fmdev->resp_comp = NULL;
  281. atomic_set(&fmdev->tx_cnt, 1);
  282. }
  283. /* Is this for interrupt handler? */
  284. else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) {
  285. if (fmdev->resp_skb != NULL)
  286. fmerr("Response SKB ptr not NULL\n");
  287. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  288. fmdev->resp_skb = skb;
  289. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  290. /* Execute interrupt handler where state index points */
  291. irq_info->handlers[irq_info->stage](fmdev);
  292. kfree_skb(skb);
  293. atomic_set(&fmdev->tx_cnt, 1);
  294. } else {
  295. fmerr("Nobody claimed SKB(%p),purging\n", skb);
  296. }
  297. /*
  298. * Check flow control field. If Num_FM_HCI_Commands field is
  299. * not zero, schedule FM TX tasklet.
  300. */
  301. if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt))
  302. if (!skb_queue_empty(&fmdev->tx_q))
  303. tasklet_schedule(&fmdev->tx_task);
  304. }
  305. }
  306. /* FM send tasklet: is scheduled when FM packet has to be sent to chip */
  307. static void send_tasklet(unsigned long arg)
  308. {
  309. struct fmdev *fmdev;
  310. struct sk_buff *skb;
  311. int len;
  312. fmdev = (struct fmdev *)arg;
  313. if (!atomic_read(&fmdev->tx_cnt))
  314. return;
  315. /* Check, is there any timeout happened to last transmitted packet */
  316. if ((jiffies - fmdev->last_tx_jiffies) > FM_DRV_TX_TIMEOUT) {
  317. fmerr("TX timeout occurred\n");
  318. atomic_set(&fmdev->tx_cnt, 1);
  319. }
  320. /* Send queued FM TX packets */
  321. skb = skb_dequeue(&fmdev->tx_q);
  322. if (!skb)
  323. return;
  324. atomic_dec(&fmdev->tx_cnt);
  325. fmdev->pre_op = fm_cb(skb)->fm_op;
  326. if (fmdev->resp_comp != NULL)
  327. fmerr("Response completion handler is not NULL\n");
  328. fmdev->resp_comp = fm_cb(skb)->completion;
  329. /* Write FM packet to ST driver */
  330. len = g_st_write(skb);
  331. if (len < 0) {
  332. kfree_skb(skb);
  333. fmdev->resp_comp = NULL;
  334. fmerr("TX tasklet failed to send skb(%p)\n", skb);
  335. atomic_set(&fmdev->tx_cnt, 1);
  336. } else {
  337. fmdev->last_tx_jiffies = jiffies;
  338. }
  339. }
  340. /*
  341. * Queues FM Channel-8 packet to FM TX queue and schedules FM TX tasklet for
  342. * transmission
  343. */
  344. static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
  345. int payload_len, struct completion *wait_completion)
  346. {
  347. struct sk_buff *skb;
  348. struct fm_cmd_msg_hdr *hdr;
  349. int size;
  350. if (fm_op >= FM_INTERRUPT) {
  351. fmerr("Invalid fm opcode - %d\n", fm_op);
  352. return -EINVAL;
  353. }
  354. if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) {
  355. fmerr("Payload data is NULL during fw download\n");
  356. return -EINVAL;
  357. }
  358. if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag))
  359. size =
  360. FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len);
  361. else
  362. size = payload_len;
  363. skb = alloc_skb(size, GFP_ATOMIC);
  364. if (!skb) {
  365. fmerr("No memory to create new SKB\n");
  366. return -ENOMEM;
  367. }
  368. /*
  369. * Don't fill FM header info for the commands which come from
  370. * FM firmware file.
  371. */
  372. if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
  373. test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
  374. /* Fill command header info */
  375. hdr = (struct fm_cmd_msg_hdr *)skb_put(skb, FM_CMD_MSG_HDR_SIZE);
  376. hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */
  377. /* 3 (fm_opcode,rd_wr,dlen) + payload len) */
  378. hdr->len = ((payload == NULL) ? 0 : payload_len) + 3;
  379. /* FM opcode */
  380. hdr->op = fm_op;
  381. /* read/write type */
  382. hdr->rd_wr = type;
  383. hdr->dlen = payload_len;
  384. fm_cb(skb)->fm_op = fm_op;
  385. /*
  386. * If firmware download has finished and the command is
  387. * not a read command then payload is != NULL - a write
  388. * command with u16 payload - convert to be16
  389. */
  390. if (payload != NULL)
  391. *(__be16 *)payload = cpu_to_be16(*(u16 *)payload);
  392. } else if (payload != NULL) {
  393. fm_cb(skb)->fm_op = *((u8 *)payload + 2);
  394. }
  395. if (payload != NULL)
  396. memcpy(skb_put(skb, payload_len), payload, payload_len);
  397. fm_cb(skb)->completion = wait_completion;
  398. skb_queue_tail(&fmdev->tx_q, skb);
  399. tasklet_schedule(&fmdev->tx_task);
  400. return 0;
  401. }
  402. /* Sends FM Channel-8 command to the chip and waits for the response */
  403. int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
  404. unsigned int payload_len, void *response, int *response_len)
  405. {
  406. struct sk_buff *skb;
  407. struct fm_event_msg_hdr *evt_hdr;
  408. unsigned long flags;
  409. int ret;
  410. init_completion(&fmdev->maintask_comp);
  411. ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
  412. &fmdev->maintask_comp);
  413. if (ret)
  414. return ret;
  415. if (!wait_for_completion_timeout(&fmdev->maintask_comp,
  416. FM_DRV_TX_TIMEOUT)) {
  417. fmerr("Timeout(%d sec),didn't get reg"
  418. "completion signal from RX tasklet\n",
  419. jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
  420. return -ETIMEDOUT;
  421. }
  422. if (!fmdev->resp_skb) {
  423. fmerr("Response SKB is missing\n");
  424. return -EFAULT;
  425. }
  426. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  427. skb = fmdev->resp_skb;
  428. fmdev->resp_skb = NULL;
  429. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  430. evt_hdr = (void *)skb->data;
  431. if (evt_hdr->status != 0) {
  432. fmerr("Received event pkt status(%d) is not zero\n",
  433. evt_hdr->status);
  434. kfree_skb(skb);
  435. return -EIO;
  436. }
  437. /* Send response data to caller */
  438. if (response != NULL && response_len != NULL && evt_hdr->dlen) {
  439. /* Skip header info and copy only response data */
  440. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  441. memcpy(response, skb->data, evt_hdr->dlen);
  442. *response_len = evt_hdr->dlen;
  443. } else if (response_len != NULL && evt_hdr->dlen == 0) {
  444. *response_len = 0;
  445. }
  446. kfree_skb(skb);
  447. return 0;
  448. }
  449. /* --- Helper functions used in FM interrupt handlers ---*/
  450. static inline int check_cmdresp_status(struct fmdev *fmdev,
  451. struct sk_buff **skb)
  452. {
  453. struct fm_event_msg_hdr *fm_evt_hdr;
  454. unsigned long flags;
  455. del_timer(&fmdev->irq_info.timer);
  456. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  457. *skb = fmdev->resp_skb;
  458. fmdev->resp_skb = NULL;
  459. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  460. fm_evt_hdr = (void *)(*skb)->data;
  461. if (fm_evt_hdr->status != 0) {
  462. fmerr("irq: opcode %x response status is not zero "
  463. "Initiating irq recovery process\n",
  464. fm_evt_hdr->op);
  465. mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
  466. return -1;
  467. }
  468. return 0;
  469. }
  470. static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
  471. {
  472. struct sk_buff *skb;
  473. if (!check_cmdresp_status(fmdev, &skb))
  474. fm_irq_call_stage(fmdev, stage);
  475. }
  476. /*
  477. * Interrupt process timeout handler.
  478. * One of the irq handler did not get proper response from the chip. So take
  479. * recovery action here. FM interrupts are disabled in the beginning of
  480. * interrupt process. Therefore reset stage index to re-enable default
  481. * interrupts. So that next interrupt will be processed as usual.
  482. */
  483. static void int_timeout_handler(unsigned long data)
  484. {
  485. struct fmdev *fmdev;
  486. struct fm_irq *fmirq;
  487. fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
  488. fmdev = (struct fmdev *)data;
  489. fmirq = &fmdev->irq_info;
  490. fmirq->retry++;
  491. if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) {
  492. /* Stop recovery action (interrupt reenable process) and
  493. * reset stage index & retry count values */
  494. fmirq->stage = 0;
  495. fmirq->retry = 0;
  496. fmerr("Recovery action failed during"
  497. "irq processing, max retry reached\n");
  498. return;
  499. }
  500. fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
  501. }
  502. /* --------- FM interrupt handlers ------------*/
  503. static void fm_irq_send_flag_getcmd(struct fmdev *fmdev)
  504. {
  505. u16 flag;
  506. /* Send FLAG_GET command , to know the source of interrupt */
  507. if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL))
  508. fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX);
  509. }
  510. static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
  511. {
  512. struct sk_buff *skb;
  513. struct fm_event_msg_hdr *fm_evt_hdr;
  514. if (check_cmdresp_status(fmdev, &skb))
  515. return;
  516. fm_evt_hdr = (void *)skb->data;
  517. /* Skip header info and copy only response data */
  518. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  519. memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
  520. fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag);
  521. fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
  522. /* Continue next function in interrupt handler table */
  523. fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX);
  524. }
  525. static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev)
  526. {
  527. if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask)
  528. fmerr("irq: HW MAL int received - do nothing\n");
  529. /* Continue next function in interrupt handler table */
  530. fm_irq_call_stage(fmdev, FM_RDS_START_IDX);
  531. }
  532. static void fm_irq_handle_rds_start(struct fmdev *fmdev)
  533. {
  534. if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) {
  535. fmdbg("irq: rds threshold reached\n");
  536. fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX;
  537. } else {
  538. /* Continue next function in interrupt handler table */
  539. fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX;
  540. }
  541. fm_irq_call(fmdev);
  542. }
  543. static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev)
  544. {
  545. /* Send the command to read RDS data from the chip */
  546. if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL,
  547. (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL))
  548. fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX);
  549. }
  550. /* Keeps track of current RX channel AF (Alternate Frequency) */
  551. static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
  552. {
  553. struct tuned_station_info *stat_info = &fmdev->rx.stat_info;
  554. u8 reg_idx = fmdev->rx.region.fm_band;
  555. u8 index;
  556. u32 freq;
  557. /* First AF indicates the number of AF follows. Reset the list */
  558. if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) {
  559. fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1);
  560. fmdev->rx.stat_info.afcache_size = 0;
  561. fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max);
  562. return;
  563. }
  564. if (af < FM_RDS_MIN_AF)
  565. return;
  566. if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF)
  567. return;
  568. if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN)
  569. return;
  570. freq = fmdev->rx.region.bot_freq + (af * 100);
  571. if (freq == fmdev->rx.freq) {
  572. fmdbg("Current freq(%d) is matching with received AF(%d)\n",
  573. fmdev->rx.freq, freq);
  574. return;
  575. }
  576. /* Do check in AF cache */
  577. for (index = 0; index < stat_info->afcache_size; index++) {
  578. if (stat_info->af_cache[index] == freq)
  579. break;
  580. }
  581. /* Reached the limit of the list - ignore the next AF */
  582. if (index == stat_info->af_list_max) {
  583. fmdbg("AF cache is full\n");
  584. return;
  585. }
  586. /*
  587. * If we reached the end of the list then this AF is not
  588. * in the list - add it.
  589. */
  590. if (index == stat_info->afcache_size) {
  591. fmdbg("Storing AF %d to cache index %d\n", freq, index);
  592. stat_info->af_cache[index] = freq;
  593. stat_info->afcache_size++;
  594. }
  595. }
  596. /*
  597. * Converts RDS buffer data from big endian format
  598. * to little endian format.
  599. */
  600. static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
  601. struct fm_rdsdata_format *rds_format)
  602. {
  603. u8 index = 0;
  604. u8 *rds_buff;
  605. /*
  606. * Since in Orca the 2 RDS Data bytes are in little endian and
  607. * in Dolphin they are in big endian, the parsing of the RDS data
  608. * is chip dependent
  609. */
  610. if (fmdev->asci_id != 0x6350) {
  611. rds_buff = &rds_format->data.groupdatabuff.buff[0];
  612. while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
  613. swap(rds_buff[index], rds_buff[index + 1]);
  614. index += 2;
  615. }
  616. }
  617. }
  618. static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
  619. {
  620. struct sk_buff *skb;
  621. struct fm_rdsdata_format rds_fmt;
  622. struct fm_rds *rds = &fmdev->rx.rds;
  623. unsigned long group_idx, flags;
  624. u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE];
  625. u8 type, blk_idx;
  626. u16 cur_picode;
  627. u32 rds_len;
  628. if (check_cmdresp_status(fmdev, &skb))
  629. return;
  630. /* Skip header info */
  631. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  632. rds_data = skb->data;
  633. rds_len = skb->len;
  634. /* Parse the RDS data */
  635. while (rds_len >= FM_RDS_BLK_SIZE) {
  636. meta_data = rds_data[2];
  637. /* Get the type: 0=A, 1=B, 2=C, 3=C', 4=D, 5=E */
  638. type = (meta_data & 0x07);
  639. /* Transform the blk type into index sequence (0, 1, 2, 3, 4) */
  640. blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
  641. fmdbg("Block index:%d(%s)\n", blk_idx,
  642. (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok");
  643. if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
  644. break;
  645. if (blk_idx > FM_RDS_BLK_IDX_D) {
  646. fmdbg("Block sequence mismatch\n");
  647. rds->last_blk_idx = -1;
  648. break;
  649. }
  650. /* Skip checkword (control) byte and copy only data byte */
  651. memcpy(&rds_fmt.data.groupdatabuff.
  652. buff[blk_idx * (FM_RDS_BLK_SIZE - 1)],
  653. rds_data, (FM_RDS_BLK_SIZE - 1));
  654. rds->last_blk_idx = blk_idx;
  655. /* If completed a whole group then handle it */
  656. if (blk_idx == FM_RDS_BLK_IDX_D) {
  657. fmdbg("Good block received\n");
  658. fm_rdsparse_swapbytes(fmdev, &rds_fmt);
  659. /*
  660. * Extract PI code and store in local cache.
  661. * We need this during AF switch processing.
  662. */
  663. cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata);
  664. if (fmdev->rx.stat_info.picode != cur_picode)
  665. fmdev->rx.stat_info.picode = cur_picode;
  666. fmdbg("picode:%d\n", cur_picode);
  667. group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
  668. fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2,
  669. (group_idx % 2) ? "B" : "A");
  670. group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
  671. if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) {
  672. fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]);
  673. fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]);
  674. }
  675. }
  676. rds_len -= FM_RDS_BLK_SIZE;
  677. rds_data += FM_RDS_BLK_SIZE;
  678. }
  679. /* Copy raw rds data to internal rds buffer */
  680. rds_data = skb->data;
  681. rds_len = skb->len;
  682. spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
  683. while (rds_len > 0) {
  684. /*
  685. * Fill RDS buffer as per V4L2 specification.
  686. * Store control byte
  687. */
  688. type = (rds_data[2] & 0x07);
  689. blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
  690. tmpbuf[2] = blk_idx; /* Offset name */
  691. tmpbuf[2] |= blk_idx << 3; /* Received offset */
  692. /* Store data byte */
  693. tmpbuf[0] = rds_data[0];
  694. tmpbuf[1] = rds_data[1];
  695. memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE);
  696. rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size;
  697. /* Check for overflow & start over */
  698. if (rds->wr_idx == rds->rd_idx) {
  699. fmdbg("RDS buffer overflow\n");
  700. rds->wr_idx = 0;
  701. rds->rd_idx = 0;
  702. break;
  703. }
  704. rds_len -= FM_RDS_BLK_SIZE;
  705. rds_data += FM_RDS_BLK_SIZE;
  706. }
  707. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  708. /* Wakeup read queue */
  709. if (rds->wr_idx != rds->rd_idx)
  710. wake_up_interruptible(&rds->read_queue);
  711. fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX);
  712. }
  713. static void fm_irq_handle_rds_finish(struct fmdev *fmdev)
  714. {
  715. fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX);
  716. }
  717. static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev)
  718. {
  719. if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev->
  720. irq_info.mask) {
  721. fmdbg("irq: tune ended/bandlimit reached\n");
  722. if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) {
  723. fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX;
  724. } else {
  725. complete(&fmdev->maintask_comp);
  726. fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
  727. }
  728. } else
  729. fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
  730. fm_irq_call(fmdev);
  731. }
  732. static void fm_irq_handle_power_enb(struct fmdev *fmdev)
  733. {
  734. if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) {
  735. fmdbg("irq: Power Enabled/Disabled\n");
  736. complete(&fmdev->maintask_comp);
  737. }
  738. fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX);
  739. }
  740. static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev)
  741. {
  742. if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) &&
  743. (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) &&
  744. (fmdev->rx.freq != FM_UNDEFINED_FREQ) &&
  745. (fmdev->rx.stat_info.afcache_size != 0)) {
  746. fmdbg("irq: rssi level has fallen below threshold level\n");
  747. /* Disable further low RSSI interrupts */
  748. fmdev->irq_info.mask &= ~FM_LEV_EVENT;
  749. fmdev->rx.afjump_idx = 0;
  750. fmdev->rx.freq_before_jump = fmdev->rx.freq;
  751. fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
  752. } else {
  753. /* Continue next function in interrupt handler table */
  754. fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX;
  755. }
  756. fm_irq_call(fmdev);
  757. }
  758. static void fm_irq_afjump_set_pi(struct fmdev *fmdev)
  759. {
  760. u16 payload;
  761. /* Set PI code - must be updated if the AF list is not empty */
  762. payload = fmdev->rx.stat_info.picode;
  763. if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL))
  764. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX);
  765. }
  766. static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev)
  767. {
  768. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX);
  769. }
  770. /*
  771. * Set PI mask.
  772. * 0xFFFF = Enable PI code matching
  773. * 0x0000 = Disable PI code matching
  774. */
  775. static void fm_irq_afjump_set_pimask(struct fmdev *fmdev)
  776. {
  777. u16 payload;
  778. payload = 0x0000;
  779. if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
  780. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX);
  781. }
  782. static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev)
  783. {
  784. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX);
  785. }
  786. static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
  787. {
  788. u16 frq_index;
  789. u16 payload;
  790. fmdbg("Swtich to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
  791. frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
  792. fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
  793. payload = frq_index;
  794. if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL))
  795. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX);
  796. }
  797. static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev)
  798. {
  799. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX);
  800. }
  801. static void fm_irq_afjump_enableint(struct fmdev *fmdev)
  802. {
  803. u16 payload;
  804. /* Enable FR (tuning operation ended) interrupt */
  805. payload = FM_FR_EVENT;
  806. if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
  807. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX);
  808. }
  809. static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev)
  810. {
  811. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX);
  812. }
  813. static void fm_irq_start_afjump(struct fmdev *fmdev)
  814. {
  815. u16 payload;
  816. payload = FM_TUNER_AF_JUMP_MODE;
  817. if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
  818. sizeof(payload), NULL))
  819. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX);
  820. }
  821. static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev)
  822. {
  823. struct sk_buff *skb;
  824. if (check_cmdresp_status(fmdev, &skb))
  825. return;
  826. fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
  827. set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag);
  828. clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  829. }
  830. static void fm_irq_afjump_rd_freq(struct fmdev *fmdev)
  831. {
  832. u16 payload;
  833. if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL))
  834. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX);
  835. }
  836. static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
  837. {
  838. struct sk_buff *skb;
  839. u16 read_freq;
  840. u32 curr_freq, jumped_freq;
  841. if (check_cmdresp_status(fmdev, &skb))
  842. return;
  843. /* Skip header info and copy only response data */
  844. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  845. memcpy(&read_freq, skb->data, sizeof(read_freq));
  846. read_freq = be16_to_cpu((__force __be16)read_freq);
  847. curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
  848. jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
  849. /* If the frequency was changed the jump succeeded */
  850. if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) {
  851. fmdbg("Successfully switched to alternate freq %d\n", curr_freq);
  852. fmdev->rx.freq = curr_freq;
  853. fm_rx_reset_rds_cache(fmdev);
  854. /* AF feature is on, enable low level RSSI interrupt */
  855. if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
  856. fmdev->irq_info.mask |= FM_LEV_EVENT;
  857. fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
  858. } else { /* jump to the next freq in the AF list */
  859. fmdev->rx.afjump_idx++;
  860. /* If we reached the end of the list - stop searching */
  861. if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) {
  862. fmdbg("AF switch processing failed\n");
  863. fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
  864. } else { /* AF List is not over - try next one */
  865. fmdbg("Trying next freq in AF cache\n");
  866. fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
  867. }
  868. }
  869. fm_irq_call(fmdev);
  870. }
  871. static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev)
  872. {
  873. fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
  874. }
  875. static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev)
  876. {
  877. u16 payload;
  878. /* Re-enable FM interrupts */
  879. payload = fmdev->irq_info.mask;
  880. if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
  881. sizeof(payload), NULL))
  882. fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX);
  883. }
  884. static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
  885. {
  886. struct sk_buff *skb;
  887. if (check_cmdresp_status(fmdev, &skb))
  888. return;
  889. /*
  890. * This is last function in interrupt table to be executed.
  891. * So, reset stage index to 0.
  892. */
  893. fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
  894. /* Start processing any pending interrupt */
  895. if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag))
  896. fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
  897. else
  898. clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  899. }
  900. /* Returns availability of RDS data in internel buffer */
  901. int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
  902. struct poll_table_struct *pts)
  903. {
  904. poll_wait(file, &fmdev->rx.rds.read_queue, pts);
  905. if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx)
  906. return 0;
  907. return -EAGAIN;
  908. }
  909. /* Copies RDS data from internal buffer to user buffer */
  910. int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
  911. u8 __user *buf, size_t count)
  912. {
  913. u32 block_count;
  914. u8 tmpbuf[FM_RDS_BLK_SIZE];
  915. unsigned long flags;
  916. int ret;
  917. if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
  918. if (file->f_flags & O_NONBLOCK)
  919. return -EWOULDBLOCK;
  920. ret = wait_event_interruptible(fmdev->rx.rds.read_queue,
  921. (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx));
  922. if (ret)
  923. return -EINTR;
  924. }
  925. /* Calculate block count from byte count */
  926. count /= FM_RDS_BLK_SIZE;
  927. block_count = 0;
  928. ret = 0;
  929. while (block_count < count) {
  930. spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
  931. if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
  932. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  933. break;
  934. }
  935. memcpy(tmpbuf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx],
  936. FM_RDS_BLK_SIZE);
  937. fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE;
  938. if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size)
  939. fmdev->rx.rds.rd_idx = 0;
  940. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  941. if (copy_to_user(buf, tmpbuf, FM_RDS_BLK_SIZE))
  942. break;
  943. block_count++;
  944. buf += FM_RDS_BLK_SIZE;
  945. ret += FM_RDS_BLK_SIZE;
  946. }
  947. return ret;
  948. }
  949. int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
  950. {
  951. switch (fmdev->curr_fmmode) {
  952. case FM_MODE_RX:
  953. return fm_rx_set_freq(fmdev, freq_to_set);
  954. case FM_MODE_TX:
  955. return fm_tx_set_freq(fmdev, freq_to_set);
  956. default:
  957. return -EINVAL;
  958. }
  959. }
  960. int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
  961. {
  962. if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
  963. fmerr("RX frequency is not set\n");
  964. return -EPERM;
  965. }
  966. if (cur_tuned_frq == NULL) {
  967. fmerr("Invalid memory\n");
  968. return -ENOMEM;
  969. }
  970. switch (fmdev->curr_fmmode) {
  971. case FM_MODE_RX:
  972. *cur_tuned_frq = fmdev->rx.freq;
  973. return 0;
  974. case FM_MODE_TX:
  975. *cur_tuned_frq = 0; /* TODO : Change this later */
  976. return 0;
  977. default:
  978. return -EINVAL;
  979. }
  980. }
  981. int fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
  982. {
  983. switch (fmdev->curr_fmmode) {
  984. case FM_MODE_RX:
  985. return fm_rx_set_region(fmdev, region_to_set);
  986. case FM_MODE_TX:
  987. return fm_tx_set_region(fmdev, region_to_set);
  988. default:
  989. return -EINVAL;
  990. }
  991. }
  992. int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
  993. {
  994. switch (fmdev->curr_fmmode) {
  995. case FM_MODE_RX:
  996. return fm_rx_set_mute_mode(fmdev, mute_mode_toset);
  997. case FM_MODE_TX:
  998. return fm_tx_set_mute_mode(fmdev, mute_mode_toset);
  999. default:
  1000. return -EINVAL;
  1001. }
  1002. }
  1003. int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
  1004. {
  1005. switch (fmdev->curr_fmmode) {
  1006. case FM_MODE_RX:
  1007. return fm_rx_set_stereo_mono(fmdev, mode);
  1008. case FM_MODE_TX:
  1009. return fm_tx_set_stereo_mono(fmdev, mode);
  1010. default:
  1011. return -EINVAL;
  1012. }
  1013. }
  1014. int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
  1015. {
  1016. switch (fmdev->curr_fmmode) {
  1017. case FM_MODE_RX:
  1018. return fm_rx_set_rds_mode(fmdev, rds_en_dis);
  1019. case FM_MODE_TX:
  1020. return fm_tx_set_rds_mode(fmdev, rds_en_dis);
  1021. default:
  1022. return -EINVAL;
  1023. }
  1024. }
  1025. /* Sends power off command to the chip */
  1026. static int fm_power_down(struct fmdev *fmdev)
  1027. {
  1028. u16 payload;
  1029. int ret;
  1030. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1031. fmerr("FM core is not ready\n");
  1032. return -EPERM;
  1033. }
  1034. if (fmdev->curr_fmmode == FM_MODE_OFF) {
  1035. fmdbg("FM chip is already in OFF state\n");
  1036. return 0;
  1037. }
  1038. payload = 0x0;
  1039. ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
  1040. sizeof(payload), NULL, NULL);
  1041. if (ret < 0)
  1042. return ret;
  1043. return fmc_release(fmdev);
  1044. }
  1045. /* Reads init command from FM firmware file and loads to the chip */
  1046. static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
  1047. {
  1048. const struct firmware *fw_entry;
  1049. struct bts_header *fw_header;
  1050. struct bts_action *action;
  1051. struct bts_action_delay *delay;
  1052. u8 *fw_data;
  1053. int ret, fw_len, cmd_cnt;
  1054. cmd_cnt = 0;
  1055. set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
  1056. ret = request_firmware(&fw_entry, fw_name,
  1057. &fmdev->radio_dev->dev);
  1058. if (ret < 0) {
  1059. fmerr("Unable to read firmware(%s) content\n", fw_name);
  1060. return ret;
  1061. }
  1062. fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
  1063. fw_data = (void *)fw_entry->data;
  1064. fw_len = fw_entry->size;
  1065. fw_header = (struct bts_header *)fw_data;
  1066. if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) {
  1067. fmerr("%s not a legal TI firmware file\n", fw_name);
  1068. ret = -EINVAL;
  1069. goto rel_fw;
  1070. }
  1071. fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic);
  1072. /* Skip file header info , we already verified it */
  1073. fw_data += sizeof(struct bts_header);
  1074. fw_len -= sizeof(struct bts_header);
  1075. while (fw_data && fw_len > 0) {
  1076. action = (struct bts_action *)fw_data;
  1077. switch (action->type) {
  1078. case ACTION_SEND_COMMAND: /* Send */
  1079. if (fmc_send_cmd(fmdev, 0, 0, action->data,
  1080. action->size, NULL, NULL))
  1081. goto rel_fw;
  1082. cmd_cnt++;
  1083. break;
  1084. case ACTION_DELAY: /* Delay */
  1085. delay = (struct bts_action_delay *)action->data;
  1086. mdelay(delay->msec);
  1087. break;
  1088. }
  1089. fw_data += (sizeof(struct bts_action) + (action->size));
  1090. fw_len -= (sizeof(struct bts_action) + (action->size));
  1091. }
  1092. fmdbg("Firmware commands(%d) loaded to chip\n", cmd_cnt);
  1093. rel_fw:
  1094. release_firmware(fw_entry);
  1095. clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
  1096. return ret;
  1097. }
  1098. /* Loads default RX configuration to the chip */
  1099. static int load_default_rx_configuration(struct fmdev *fmdev)
  1100. {
  1101. int ret;
  1102. ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME);
  1103. if (ret < 0)
  1104. return ret;
  1105. return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD);
  1106. }
  1107. /* Does FM power on sequence */
  1108. static int fm_power_up(struct fmdev *fmdev, u8 mode)
  1109. {
  1110. u16 payload;
  1111. __be16 asic_id, asic_ver;
  1112. int resp_len, ret;
  1113. u8 fw_name[50];
  1114. if (mode >= FM_MODE_ENTRY_MAX) {
  1115. fmerr("Invalid firmware download option\n");
  1116. return -EINVAL;
  1117. }
  1118. /*
  1119. * Initialize FM common module. FM GPIO toggling is
  1120. * taken care in Shared Transport driver.
  1121. */
  1122. ret = fmc_prepare(fmdev);
  1123. if (ret < 0) {
  1124. fmerr("Unable to prepare FM Common\n");
  1125. return ret;
  1126. }
  1127. payload = FM_ENABLE;
  1128. if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
  1129. sizeof(payload), NULL, NULL))
  1130. goto rel;
  1131. /* Allow the chip to settle down in Channel-8 mode */
  1132. msleep(20);
  1133. if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL,
  1134. sizeof(asic_id), &asic_id, &resp_len))
  1135. goto rel;
  1136. if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL,
  1137. sizeof(asic_ver), &asic_ver, &resp_len))
  1138. goto rel;
  1139. fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n",
  1140. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1141. sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START,
  1142. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1143. ret = fm_download_firmware(fmdev, fw_name);
  1144. if (ret < 0) {
  1145. fmdbg("Failed to download firmware file %s\n", fw_name);
  1146. goto rel;
  1147. }
  1148. sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ?
  1149. FM_RX_FW_FILE_START : FM_TX_FW_FILE_START,
  1150. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1151. ret = fm_download_firmware(fmdev, fw_name);
  1152. if (ret < 0) {
  1153. fmdbg("Failed to download firmware file %s\n", fw_name);
  1154. goto rel;
  1155. } else
  1156. return ret;
  1157. rel:
  1158. return fmc_release(fmdev);
  1159. }
  1160. /* Set FM Modes(TX, RX, OFF) */
  1161. int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
  1162. {
  1163. int ret = 0;
  1164. if (fm_mode >= FM_MODE_ENTRY_MAX) {
  1165. fmerr("Invalid FM mode\n");
  1166. return -EINVAL;
  1167. }
  1168. if (fmdev->curr_fmmode == fm_mode) {
  1169. fmdbg("Already fm is in mode(%d)\n", fm_mode);
  1170. return ret;
  1171. }
  1172. switch (fm_mode) {
  1173. case FM_MODE_OFF: /* OFF Mode */
  1174. ret = fm_power_down(fmdev);
  1175. if (ret < 0) {
  1176. fmerr("Failed to set OFF mode\n");
  1177. return ret;
  1178. }
  1179. break;
  1180. case FM_MODE_TX: /* TX Mode */
  1181. case FM_MODE_RX: /* RX Mode */
  1182. /* Power down before switching to TX or RX mode */
  1183. if (fmdev->curr_fmmode != FM_MODE_OFF) {
  1184. ret = fm_power_down(fmdev);
  1185. if (ret < 0) {
  1186. fmerr("Failed to set OFF mode\n");
  1187. return ret;
  1188. }
  1189. msleep(30);
  1190. }
  1191. ret = fm_power_up(fmdev, fm_mode);
  1192. if (ret < 0) {
  1193. fmerr("Failed to load firmware\n");
  1194. return ret;
  1195. }
  1196. }
  1197. fmdev->curr_fmmode = fm_mode;
  1198. /* Set default configuration */
  1199. if (fmdev->curr_fmmode == FM_MODE_RX) {
  1200. fmdbg("Loading default rx configuration..\n");
  1201. ret = load_default_rx_configuration(fmdev);
  1202. if (ret < 0)
  1203. fmerr("Failed to load default values\n");
  1204. }
  1205. return ret;
  1206. }
  1207. /* Returns current FM mode (TX, RX, OFF) */
  1208. int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
  1209. {
  1210. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1211. fmerr("FM core is not ready\n");
  1212. return -EPERM;
  1213. }
  1214. if (fmmode == NULL) {
  1215. fmerr("Invalid memory\n");
  1216. return -ENOMEM;
  1217. }
  1218. *fmmode = fmdev->curr_fmmode;
  1219. return 0;
  1220. }
  1221. /* Called by ST layer when FM packet is available */
  1222. static long fm_st_receive(void *arg, struct sk_buff *skb)
  1223. {
  1224. struct fmdev *fmdev;
  1225. fmdev = (struct fmdev *)arg;
  1226. if (skb == NULL) {
  1227. fmerr("Invalid SKB received from ST\n");
  1228. return -EFAULT;
  1229. }
  1230. if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
  1231. fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
  1232. return -EINVAL;
  1233. }
  1234. memcpy(skb_push(skb, 1), &skb->cb[0], 1);
  1235. skb_queue_tail(&fmdev->rx_q, skb);
  1236. tasklet_schedule(&fmdev->rx_task);
  1237. return 0;
  1238. }
  1239. /*
  1240. * Called by ST layer to indicate protocol registration completion
  1241. * status.
  1242. */
  1243. static void fm_st_reg_comp_cb(void *arg, char data)
  1244. {
  1245. struct fmdev *fmdev;
  1246. fmdev = (struct fmdev *)arg;
  1247. fmdev->streg_cbdata = data;
  1248. complete(&wait_for_fmdrv_reg_comp);
  1249. }
  1250. /*
  1251. * This function will be called from FM V4L2 open function.
  1252. * Register with ST driver and initialize driver data.
  1253. */
  1254. int fmc_prepare(struct fmdev *fmdev)
  1255. {
  1256. static struct st_proto_s fm_st_proto;
  1257. int ret;
  1258. if (test_bit(FM_CORE_READY, &fmdev->flag)) {
  1259. fmdbg("FM Core is already up\n");
  1260. return 0;
  1261. }
  1262. memset(&fm_st_proto, 0, sizeof(fm_st_proto));
  1263. fm_st_proto.recv = fm_st_receive;
  1264. fm_st_proto.match_packet = NULL;
  1265. fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb;
  1266. fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */
  1267. fm_st_proto.priv_data = fmdev;
  1268. fm_st_proto.chnl_id = 0x08;
  1269. fm_st_proto.max_frame_size = 0xff;
  1270. fm_st_proto.hdr_len = 1;
  1271. fm_st_proto.offset_len_in_hdr = 0;
  1272. fm_st_proto.len_size = 1;
  1273. fm_st_proto.reserve = 1;
  1274. ret = st_register(&fm_st_proto);
  1275. if (ret == -EINPROGRESS) {
  1276. init_completion(&wait_for_fmdrv_reg_comp);
  1277. fmdev->streg_cbdata = -EINPROGRESS;
  1278. fmdbg("%s waiting for ST reg completion signal\n", __func__);
  1279. if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
  1280. FM_ST_REG_TIMEOUT)) {
  1281. fmerr("Timeout(%d sec), didn't get reg "
  1282. "completion signal from ST\n",
  1283. jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
  1284. return -ETIMEDOUT;
  1285. }
  1286. if (fmdev->streg_cbdata != 0) {
  1287. fmerr("ST reg comp CB called with error "
  1288. "status %d\n", fmdev->streg_cbdata);
  1289. return -EAGAIN;
  1290. }
  1291. ret = 0;
  1292. } else if (ret == -1) {
  1293. fmerr("st_register failed %d\n", ret);
  1294. return -EAGAIN;
  1295. }
  1296. if (fm_st_proto.write != NULL) {
  1297. g_st_write = fm_st_proto.write;
  1298. } else {
  1299. fmerr("Failed to get ST write func pointer\n");
  1300. ret = st_unregister(&fm_st_proto);
  1301. if (ret < 0)
  1302. fmerr("st_unregister failed %d\n", ret);
  1303. return -EAGAIN;
  1304. }
  1305. spin_lock_init(&fmdev->rds_buff_lock);
  1306. spin_lock_init(&fmdev->resp_skb_lock);
  1307. /* Initialize TX queue and TX tasklet */
  1308. skb_queue_head_init(&fmdev->tx_q);
  1309. tasklet_init(&fmdev->tx_task, send_tasklet, (unsigned long)fmdev);
  1310. /* Initialize RX Queue and RX tasklet */
  1311. skb_queue_head_init(&fmdev->rx_q);
  1312. tasklet_init(&fmdev->rx_task, recv_tasklet, (unsigned long)fmdev);
  1313. fmdev->irq_info.stage = 0;
  1314. atomic_set(&fmdev->tx_cnt, 1);
  1315. fmdev->resp_comp = NULL;
  1316. init_timer(&fmdev->irq_info.timer);
  1317. fmdev->irq_info.timer.function = &int_timeout_handler;
  1318. fmdev->irq_info.timer.data = (unsigned long)fmdev;
  1319. /*TODO: add FM_STIC_EVENT later */
  1320. fmdev->irq_info.mask = FM_MAL_EVENT;
  1321. /* Region info */
  1322. fmdev->rx.region = region_configs[default_radio_region];
  1323. fmdev->rx.mute_mode = FM_MUTE_OFF;
  1324. fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
  1325. fmdev->rx.rds.flag = FM_RDS_DISABLE;
  1326. fmdev->rx.freq = FM_UNDEFINED_FREQ;
  1327. fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS;
  1328. fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF;
  1329. fmdev->irq_info.retry = 0;
  1330. fm_rx_reset_rds_cache(fmdev);
  1331. init_waitqueue_head(&fmdev->rx.rds.read_queue);
  1332. fm_rx_reset_station_info(fmdev);
  1333. set_bit(FM_CORE_READY, &fmdev->flag);
  1334. return ret;
  1335. }
  1336. /*
  1337. * This function will be called from FM V4L2 release function.
  1338. * Unregister from ST driver.
  1339. */
  1340. int fmc_release(struct fmdev *fmdev)
  1341. {
  1342. static struct st_proto_s fm_st_proto;
  1343. int ret;
  1344. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1345. fmdbg("FM Core is already down\n");
  1346. return 0;
  1347. }
  1348. /* Service pending read */
  1349. wake_up_interruptible(&fmdev->rx.rds.read_queue);
  1350. tasklet_kill(&fmdev->tx_task);
  1351. tasklet_kill(&fmdev->rx_task);
  1352. skb_queue_purge(&fmdev->tx_q);
  1353. skb_queue_purge(&fmdev->rx_q);
  1354. fmdev->resp_comp = NULL;
  1355. fmdev->rx.freq = 0;
  1356. memset(&fm_st_proto, 0, sizeof(fm_st_proto));
  1357. fm_st_proto.chnl_id = 0x08;
  1358. ret = st_unregister(&fm_st_proto);
  1359. if (ret < 0)
  1360. fmerr("Failed to de-register FM from ST %d\n", ret);
  1361. else
  1362. fmdbg("Successfully unregistered from ST\n");
  1363. clear_bit(FM_CORE_READY, &fmdev->flag);
  1364. return ret;
  1365. }
  1366. /*
  1367. * Module init function. Ask FM V4L module to register video device.
  1368. * Allocate memory for FM driver context and RX RDS buffer.
  1369. */
  1370. static int __init fm_drv_init(void)
  1371. {
  1372. struct fmdev *fmdev = NULL;
  1373. int ret = -ENOMEM;
  1374. fmdbg("FM driver version %s\n", FM_DRV_VERSION);
  1375. fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL);
  1376. if (NULL == fmdev) {
  1377. fmerr("Can't allocate operation structure memory\n");
  1378. return ret;
  1379. }
  1380. fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE;
  1381. fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL);
  1382. if (NULL == fmdev->rx.rds.buff) {
  1383. fmerr("Can't allocate rds ring buffer\n");
  1384. goto rel_dev;
  1385. }
  1386. ret = fm_v4l2_init_video_device(fmdev, radio_nr);
  1387. if (ret < 0)
  1388. goto rel_rdsbuf;
  1389. fmdev->irq_info.handlers = int_handler_table;
  1390. fmdev->curr_fmmode = FM_MODE_OFF;
  1391. fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF;
  1392. fmdev->tx_data.preemph = FM_TX_PREEMPH_50US;
  1393. return ret;
  1394. rel_rdsbuf:
  1395. kfree(fmdev->rx.rds.buff);
  1396. rel_dev:
  1397. kfree(fmdev);
  1398. return ret;
  1399. }
  1400. /* Module exit function. Ask FM V4L module to unregister video device */
  1401. static void __exit fm_drv_exit(void)
  1402. {
  1403. struct fmdev *fmdev = NULL;
  1404. fmdev = fm_v4l2_deinit_video_device();
  1405. if (fmdev != NULL) {
  1406. kfree(fmdev->rx.rds.buff);
  1407. kfree(fmdev);
  1408. }
  1409. }
  1410. module_init(fm_drv_init);
  1411. module_exit(fm_drv_exit);
  1412. /* ------------- Module Info ------------- */
  1413. MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>");
  1414. MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION);
  1415. MODULE_VERSION(FM_DRV_VERSION);
  1416. MODULE_LICENSE("GPL");