pxaficp_ir.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. /*
  2. * linux/drivers/net/irda/pxaficp_ir.c
  3. *
  4. * Based on sa1100_ir.c by Russell King
  5. *
  6. * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
  13. *
  14. */
  15. #include <linux/dma-mapping.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/module.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/etherdevice.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/clk.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/dma/pxa-dma.h>
  25. #include <linux/gpio.h>
  26. #include <linux/slab.h>
  27. #include <net/irda/irda.h>
  28. #include <net/irda/irmod.h>
  29. #include <net/irda/wrapper.h>
  30. #include <net/irda/irda_device.h>
  31. #include <linux/platform_data/irda-pxaficp.h>
  32. #undef __REG
  33. #define __REG(x) ((x) & 0xffff)
  34. #include <mach/regs-uart.h>
  35. #define ICCR0 0x0000 /* ICP Control Register 0 */
  36. #define ICCR1 0x0004 /* ICP Control Register 1 */
  37. #define ICCR2 0x0008 /* ICP Control Register 2 */
  38. #define ICDR 0x000c /* ICP Data Register */
  39. #define ICSR0 0x0014 /* ICP Status Register 0 */
  40. #define ICSR1 0x0018 /* ICP Status Register 1 */
  41. #define ICCR0_AME (1 << 7) /* Address match enable */
  42. #define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */
  43. #define ICCR0_RIE (1 << 5) /* Receive FIFO interrupt enable */
  44. #define ICCR0_RXE (1 << 4) /* Receive enable */
  45. #define ICCR0_TXE (1 << 3) /* Transmit enable */
  46. #define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */
  47. #define ICCR0_LBM (1 << 1) /* Loopback mode */
  48. #define ICCR0_ITR (1 << 0) /* IrDA transmission */
  49. #define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */
  50. #define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */
  51. #define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */
  52. #define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */
  53. #define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */
  54. #define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */
  55. #define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */
  56. #define ICSR0_FRE (1 << 5) /* Framing error */
  57. #define ICSR0_RFS (1 << 4) /* Receive FIFO service request */
  58. #define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */
  59. #define ICSR0_RAB (1 << 2) /* Receiver abort */
  60. #define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */
  61. #define ICSR0_EIF (1 << 0) /* End/Error in FIFO */
  62. #define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */
  63. #define ICSR1_CRE (1 << 5) /* CRC error */
  64. #define ICSR1_EOF (1 << 4) /* End of frame */
  65. #define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */
  66. #define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */
  67. #define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */
  68. #define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */
  69. #define IrSR_RXPL_NEG_IS_ZERO (1<<4)
  70. #define IrSR_RXPL_POS_IS_ZERO 0x0
  71. #define IrSR_TXPL_NEG_IS_ZERO (1<<3)
  72. #define IrSR_TXPL_POS_IS_ZERO 0x0
  73. #define IrSR_XMODE_PULSE_1_6 (1<<2)
  74. #define IrSR_XMODE_PULSE_3_16 0x0
  75. #define IrSR_RCVEIR_IR_MODE (1<<1)
  76. #define IrSR_RCVEIR_UART_MODE 0x0
  77. #define IrSR_XMITIR_IR_MODE (1<<0)
  78. #define IrSR_XMITIR_UART_MODE 0x0
  79. #define IrSR_IR_RECEIVE_ON (\
  80. IrSR_RXPL_NEG_IS_ZERO | \
  81. IrSR_TXPL_POS_IS_ZERO | \
  82. IrSR_XMODE_PULSE_3_16 | \
  83. IrSR_RCVEIR_IR_MODE | \
  84. IrSR_XMITIR_UART_MODE)
  85. #define IrSR_IR_TRANSMIT_ON (\
  86. IrSR_RXPL_NEG_IS_ZERO | \
  87. IrSR_TXPL_POS_IS_ZERO | \
  88. IrSR_XMODE_PULSE_3_16 | \
  89. IrSR_RCVEIR_UART_MODE | \
  90. IrSR_XMITIR_IR_MODE)
  91. /* macros for registers read/write */
  92. #define ficp_writel(irda, val, off) \
  93. do { \
  94. dev_vdbg(irda->dev, \
  95. "%s():%d ficp_writel(0x%x, %s)\n", \
  96. __func__, __LINE__, (val), #off); \
  97. writel_relaxed((val), (irda)->irda_base + (off)); \
  98. } while (0)
  99. #define ficp_readl(irda, off) \
  100. ({ \
  101. unsigned int _v; \
  102. _v = readl_relaxed((irda)->irda_base + (off)); \
  103. dev_vdbg(irda->dev, \
  104. "%s():%d ficp_readl(%s): 0x%x\n", \
  105. __func__, __LINE__, #off, _v); \
  106. _v; \
  107. })
  108. #define stuart_writel(irda, val, off) \
  109. do { \
  110. dev_vdbg(irda->dev, \
  111. "%s():%d stuart_writel(0x%x, %s)\n", \
  112. __func__, __LINE__, (val), #off); \
  113. writel_relaxed((val), (irda)->stuart_base + (off)); \
  114. } while (0)
  115. #define stuart_readl(irda, off) \
  116. ({ \
  117. unsigned int _v; \
  118. _v = readl_relaxed((irda)->stuart_base + (off)); \
  119. dev_vdbg(irda->dev, \
  120. "%s():%d stuart_readl(%s): 0x%x\n", \
  121. __func__, __LINE__, #off, _v); \
  122. _v; \
  123. })
  124. struct pxa_irda {
  125. int speed;
  126. int newspeed;
  127. unsigned long long last_clk;
  128. void __iomem *stuart_base;
  129. void __iomem *irda_base;
  130. unsigned char *dma_rx_buff;
  131. unsigned char *dma_tx_buff;
  132. dma_addr_t dma_rx_buff_phy;
  133. dma_addr_t dma_tx_buff_phy;
  134. unsigned int dma_tx_buff_len;
  135. struct dma_chan *txdma;
  136. struct dma_chan *rxdma;
  137. dma_cookie_t rx_cookie;
  138. dma_cookie_t tx_cookie;
  139. int drcmr_rx;
  140. int drcmr_tx;
  141. int uart_irq;
  142. int icp_irq;
  143. struct irlap_cb *irlap;
  144. struct qos_info qos;
  145. iobuff_t tx_buff;
  146. iobuff_t rx_buff;
  147. struct device *dev;
  148. struct pxaficp_platform_data *pdata;
  149. struct clk *fir_clk;
  150. struct clk *sir_clk;
  151. struct clk *cur_clk;
  152. };
  153. static int pxa_irda_set_speed(struct pxa_irda *si, int speed);
  154. static inline void pxa_irda_disable_clk(struct pxa_irda *si)
  155. {
  156. if (si->cur_clk)
  157. clk_disable_unprepare(si->cur_clk);
  158. si->cur_clk = NULL;
  159. }
  160. static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
  161. {
  162. si->cur_clk = si->fir_clk;
  163. clk_prepare_enable(si->fir_clk);
  164. }
  165. static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
  166. {
  167. si->cur_clk = si->sir_clk;
  168. clk_prepare_enable(si->sir_clk);
  169. }
  170. #define IS_FIR(si) ((si)->speed >= 4000000)
  171. #define IRDA_FRAME_SIZE_LIMIT 2047
  172. static void pxa_irda_fir_dma_rx_irq(void *data);
  173. static void pxa_irda_fir_dma_tx_irq(void *data);
  174. inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
  175. {
  176. struct dma_async_tx_descriptor *tx;
  177. tx = dmaengine_prep_slave_single(si->rxdma, si->dma_rx_buff_phy,
  178. IRDA_FRAME_SIZE_LIMIT, DMA_FROM_DEVICE,
  179. DMA_PREP_INTERRUPT);
  180. if (!tx) {
  181. dev_err(si->dev, "prep_slave_sg() failed\n");
  182. return;
  183. }
  184. tx->callback = pxa_irda_fir_dma_rx_irq;
  185. tx->callback_param = si;
  186. si->rx_cookie = dmaengine_submit(tx);
  187. dma_async_issue_pending(si->rxdma);
  188. }
  189. inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
  190. {
  191. struct dma_async_tx_descriptor *tx;
  192. tx = dmaengine_prep_slave_single(si->txdma, si->dma_tx_buff_phy,
  193. si->dma_tx_buff_len, DMA_TO_DEVICE,
  194. DMA_PREP_INTERRUPT);
  195. if (!tx) {
  196. dev_err(si->dev, "prep_slave_sg() failed\n");
  197. return;
  198. }
  199. tx->callback = pxa_irda_fir_dma_tx_irq;
  200. tx->callback_param = si;
  201. si->tx_cookie = dmaengine_submit(tx);
  202. dma_async_issue_pending(si->rxdma);
  203. }
  204. /*
  205. * Set the IrDA communications mode.
  206. */
  207. static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
  208. {
  209. if (si->pdata->transceiver_mode)
  210. si->pdata->transceiver_mode(si->dev, mode);
  211. else {
  212. if (gpio_is_valid(si->pdata->gpio_pwdown))
  213. gpio_set_value(si->pdata->gpio_pwdown,
  214. !(mode & IR_OFF) ^
  215. !si->pdata->gpio_pwdown_inverted);
  216. pxa2xx_transceiver_mode(si->dev, mode);
  217. }
  218. }
  219. /*
  220. * Set the IrDA communications speed.
  221. */
  222. static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
  223. {
  224. unsigned long flags;
  225. unsigned int divisor;
  226. switch (speed) {
  227. case 9600: case 19200: case 38400:
  228. case 57600: case 115200:
  229. /* refer to PXA250/210 Developer's Manual 10-7 */
  230. /* BaudRate = 14.7456 MHz / (16*Divisor) */
  231. divisor = 14745600 / (16 * speed);
  232. local_irq_save(flags);
  233. if (IS_FIR(si)) {
  234. /* stop RX DMA */
  235. dmaengine_terminate_all(si->rxdma);
  236. /* disable FICP */
  237. ficp_writel(si, 0, ICCR0);
  238. pxa_irda_disable_clk(si);
  239. /* set board transceiver to SIR mode */
  240. pxa_irda_set_mode(si, IR_SIRMODE);
  241. /* enable the STUART clock */
  242. pxa_irda_enable_sirclk(si);
  243. }
  244. /* disable STUART first */
  245. stuart_writel(si, 0, STIER);
  246. /* access DLL & DLH */
  247. stuart_writel(si, stuart_readl(si, STLCR) | LCR_DLAB, STLCR);
  248. stuart_writel(si, divisor & 0xff, STDLL);
  249. stuart_writel(si, divisor >> 8, STDLH);
  250. stuart_writel(si, stuart_readl(si, STLCR) & ~LCR_DLAB, STLCR);
  251. si->speed = speed;
  252. stuart_writel(si, IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6,
  253. STISR);
  254. stuart_writel(si, IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE,
  255. STIER);
  256. local_irq_restore(flags);
  257. break;
  258. case 4000000:
  259. local_irq_save(flags);
  260. /* disable STUART */
  261. stuart_writel(si, 0, STIER);
  262. stuart_writel(si, 0, STISR);
  263. pxa_irda_disable_clk(si);
  264. /* disable FICP first */
  265. ficp_writel(si, 0, ICCR0);
  266. /* set board transceiver to FIR mode */
  267. pxa_irda_set_mode(si, IR_FIRMODE);
  268. /* enable the FICP clock */
  269. pxa_irda_enable_firclk(si);
  270. si->speed = speed;
  271. pxa_irda_fir_dma_rx_start(si);
  272. ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
  273. local_irq_restore(flags);
  274. break;
  275. default:
  276. return -EINVAL;
  277. }
  278. return 0;
  279. }
  280. /* SIR interrupt service routine. */
  281. static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
  282. {
  283. struct net_device *dev = dev_id;
  284. struct pxa_irda *si = netdev_priv(dev);
  285. int iir, lsr, data;
  286. iir = stuart_readl(si, STIIR);
  287. switch (iir & 0x0F) {
  288. case 0x06: /* Receiver Line Status */
  289. lsr = stuart_readl(si, STLSR);
  290. while (lsr & LSR_FIFOE) {
  291. data = stuart_readl(si, STRBR);
  292. if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
  293. printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
  294. dev->stats.rx_errors++;
  295. if (lsr & LSR_FE)
  296. dev->stats.rx_frame_errors++;
  297. if (lsr & LSR_OE)
  298. dev->stats.rx_fifo_errors++;
  299. } else {
  300. dev->stats.rx_bytes++;
  301. async_unwrap_char(dev, &dev->stats,
  302. &si->rx_buff, data);
  303. }
  304. lsr = stuart_readl(si, STLSR);
  305. }
  306. si->last_clk = sched_clock();
  307. break;
  308. case 0x04: /* Received Data Available */
  309. /* forth through */
  310. case 0x0C: /* Character Timeout Indication */
  311. do {
  312. dev->stats.rx_bytes++;
  313. async_unwrap_char(dev, &dev->stats, &si->rx_buff,
  314. stuart_readl(si, STRBR));
  315. } while (stuart_readl(si, STLSR) & LSR_DR);
  316. si->last_clk = sched_clock();
  317. break;
  318. case 0x02: /* Transmit FIFO Data Request */
  319. while ((si->tx_buff.len) &&
  320. (stuart_readl(si, STLSR) & LSR_TDRQ)) {
  321. stuart_writel(si, *si->tx_buff.data++, STTHR);
  322. si->tx_buff.len -= 1;
  323. }
  324. if (si->tx_buff.len == 0) {
  325. dev->stats.tx_packets++;
  326. dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
  327. /* We need to ensure that the transmitter has finished. */
  328. while ((stuart_readl(si, STLSR) & LSR_TEMT) == 0)
  329. cpu_relax();
  330. si->last_clk = sched_clock();
  331. /*
  332. * Ok, we've finished transmitting. Now enable
  333. * the receiver. Sometimes we get a receive IRQ
  334. * immediately after a transmit...
  335. */
  336. if (si->newspeed) {
  337. pxa_irda_set_speed(si, si->newspeed);
  338. si->newspeed = 0;
  339. } else {
  340. /* enable IR Receiver, disable IR Transmitter */
  341. stuart_writel(si, IrSR_IR_RECEIVE_ON |
  342. IrSR_XMODE_PULSE_1_6, STISR);
  343. /* enable STUART and receive interrupts */
  344. stuart_writel(si, IER_UUE | IER_RLSE |
  345. IER_RAVIE | IER_RTIOE, STIER);
  346. }
  347. /* I'm hungry! */
  348. netif_wake_queue(dev);
  349. }
  350. break;
  351. }
  352. return IRQ_HANDLED;
  353. }
  354. /* FIR Receive DMA interrupt handler */
  355. static void pxa_irda_fir_dma_rx_irq(void *data)
  356. {
  357. struct net_device *dev = data;
  358. struct pxa_irda *si = netdev_priv(dev);
  359. dmaengine_terminate_all(si->rxdma);
  360. netdev_dbg(dev, "pxa_ir: fir rx dma bus error\n");
  361. }
  362. /* FIR Transmit DMA interrupt handler */
  363. static void pxa_irda_fir_dma_tx_irq(void *data)
  364. {
  365. struct net_device *dev = data;
  366. struct pxa_irda *si = netdev_priv(dev);
  367. dmaengine_terminate_all(si->txdma);
  368. if (dmaengine_tx_status(si->txdma, si->tx_cookie, NULL) == DMA_ERROR) {
  369. dev->stats.tx_errors++;
  370. } else {
  371. dev->stats.tx_packets++;
  372. dev->stats.tx_bytes += si->dma_tx_buff_len;
  373. }
  374. while (ficp_readl(si, ICSR1) & ICSR1_TBY)
  375. cpu_relax();
  376. si->last_clk = sched_clock();
  377. /*
  378. * HACK: It looks like the TBY bit is dropped too soon.
  379. * Without this delay things break.
  380. */
  381. udelay(120);
  382. if (si->newspeed) {
  383. pxa_irda_set_speed(si, si->newspeed);
  384. si->newspeed = 0;
  385. } else {
  386. int i = 64;
  387. ficp_writel(si, 0, ICCR0);
  388. pxa_irda_fir_dma_rx_start(si);
  389. while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
  390. ficp_readl(si, ICDR);
  391. ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
  392. if (i < 0)
  393. printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
  394. }
  395. netif_wake_queue(dev);
  396. }
  397. /* EIF(Error in FIFO/End in Frame) handler for FIR */
  398. static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
  399. {
  400. unsigned int len, stat, data;
  401. struct dma_tx_state state;
  402. /* Get the current data position. */
  403. dmaengine_tx_status(si->rxdma, si->rx_cookie, &state);
  404. len = IRDA_FRAME_SIZE_LIMIT - state.residue;
  405. do {
  406. /* Read Status, and then Data. */
  407. stat = ficp_readl(si, ICSR1);
  408. rmb();
  409. data = ficp_readl(si, ICDR);
  410. if (stat & (ICSR1_CRE | ICSR1_ROR)) {
  411. dev->stats.rx_errors++;
  412. if (stat & ICSR1_CRE) {
  413. printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
  414. dev->stats.rx_crc_errors++;
  415. }
  416. if (stat & ICSR1_ROR) {
  417. printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
  418. dev->stats.rx_over_errors++;
  419. }
  420. } else {
  421. si->dma_rx_buff[len++] = data;
  422. }
  423. /* If we hit the end of frame, there's no point in continuing. */
  424. if (stat & ICSR1_EOF)
  425. break;
  426. } while (ficp_readl(si, ICSR0) & ICSR0_EIF);
  427. if (stat & ICSR1_EOF) {
  428. /* end of frame. */
  429. struct sk_buff *skb;
  430. if (icsr0 & ICSR0_FRE) {
  431. printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
  432. dev->stats.rx_dropped++;
  433. return;
  434. }
  435. skb = alloc_skb(len+1,GFP_ATOMIC);
  436. if (!skb) {
  437. printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
  438. dev->stats.rx_dropped++;
  439. return;
  440. }
  441. /* Align IP header to 20 bytes */
  442. skb_reserve(skb, 1);
  443. skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
  444. skb_put(skb, len);
  445. /* Feed it to IrLAP */
  446. skb->dev = dev;
  447. skb_reset_mac_header(skb);
  448. skb->protocol = htons(ETH_P_IRDA);
  449. netif_rx(skb);
  450. dev->stats.rx_packets++;
  451. dev->stats.rx_bytes += len;
  452. }
  453. }
  454. /* FIR interrupt handler */
  455. static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
  456. {
  457. struct net_device *dev = dev_id;
  458. struct pxa_irda *si = netdev_priv(dev);
  459. int icsr0, i = 64;
  460. /* stop RX DMA */
  461. dmaengine_terminate_all(si->rxdma);
  462. si->last_clk = sched_clock();
  463. icsr0 = ficp_readl(si, ICSR0);
  464. if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
  465. if (icsr0 & ICSR0_FRE) {
  466. printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
  467. dev->stats.rx_frame_errors++;
  468. } else {
  469. printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
  470. dev->stats.rx_errors++;
  471. }
  472. ficp_writel(si, icsr0 & (ICSR0_FRE | ICSR0_RAB), ICSR0);
  473. }
  474. if (icsr0 & ICSR0_EIF) {
  475. /* An error in FIFO occurred, or there is a end of frame */
  476. pxa_irda_fir_irq_eif(si, dev, icsr0);
  477. }
  478. ficp_writel(si, 0, ICCR0);
  479. pxa_irda_fir_dma_rx_start(si);
  480. while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
  481. ficp_readl(si, ICDR);
  482. ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
  483. if (i < 0)
  484. printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
  485. return IRQ_HANDLED;
  486. }
  487. /* hard_xmit interface of irda device */
  488. static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
  489. {
  490. struct pxa_irda *si = netdev_priv(dev);
  491. int speed = irda_get_next_speed(skb);
  492. /*
  493. * Does this packet contain a request to change the interface
  494. * speed? If so, remember it until we complete the transmission
  495. * of this frame.
  496. */
  497. if (speed != si->speed && speed != -1)
  498. si->newspeed = speed;
  499. /*
  500. * If this is an empty frame, we can bypass a lot.
  501. */
  502. if (skb->len == 0) {
  503. if (si->newspeed) {
  504. si->newspeed = 0;
  505. pxa_irda_set_speed(si, speed);
  506. }
  507. dev_kfree_skb(skb);
  508. return NETDEV_TX_OK;
  509. }
  510. netif_stop_queue(dev);
  511. if (!IS_FIR(si)) {
  512. si->tx_buff.data = si->tx_buff.head;
  513. si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
  514. /* Disable STUART interrupts and switch to transmit mode. */
  515. stuart_writel(si, 0, STIER);
  516. stuart_writel(si, IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6,
  517. STISR);
  518. /* enable STUART and transmit interrupts */
  519. stuart_writel(si, IER_UUE | IER_TIE, STIER);
  520. } else {
  521. unsigned long mtt = irda_get_mtt(skb);
  522. si->dma_tx_buff_len = skb->len;
  523. skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
  524. if (mtt)
  525. while ((sched_clock() - si->last_clk) * 1000 < mtt)
  526. cpu_relax();
  527. /* stop RX DMA, disable FICP */
  528. dmaengine_terminate_all(si->rxdma);
  529. ficp_writel(si, 0, ICCR0);
  530. pxa_irda_fir_dma_tx_start(si);
  531. ficp_writel(si, ICCR0_ITR | ICCR0_TXE, ICCR0);
  532. }
  533. dev_kfree_skb(skb);
  534. return NETDEV_TX_OK;
  535. }
  536. static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
  537. {
  538. struct if_irda_req *rq = (struct if_irda_req *)ifreq;
  539. struct pxa_irda *si = netdev_priv(dev);
  540. int ret;
  541. switch (cmd) {
  542. case SIOCSBANDWIDTH:
  543. ret = -EPERM;
  544. if (capable(CAP_NET_ADMIN)) {
  545. /*
  546. * We are unable to set the speed if the
  547. * device is not running.
  548. */
  549. if (netif_running(dev)) {
  550. ret = pxa_irda_set_speed(si,
  551. rq->ifr_baudrate);
  552. } else {
  553. printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
  554. ret = 0;
  555. }
  556. }
  557. break;
  558. case SIOCSMEDIABUSY:
  559. ret = -EPERM;
  560. if (capable(CAP_NET_ADMIN)) {
  561. irda_device_set_media_busy(dev, TRUE);
  562. ret = 0;
  563. }
  564. break;
  565. case SIOCGRECEIVING:
  566. ret = 0;
  567. rq->ifr_receiving = IS_FIR(si) ? 0
  568. : si->rx_buff.state != OUTSIDE_FRAME;
  569. break;
  570. default:
  571. ret = -EOPNOTSUPP;
  572. break;
  573. }
  574. return ret;
  575. }
  576. static void pxa_irda_startup(struct pxa_irda *si)
  577. {
  578. /* Disable STUART interrupts */
  579. stuart_writel(si, 0, STIER);
  580. /* enable STUART interrupt to the processor */
  581. stuart_writel(si, MCR_OUT2, STMCR);
  582. /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
  583. stuart_writel(si, LCR_WLS0 | LCR_WLS1, STLCR);
  584. /* enable FIFO, we use FIFO to improve performance */
  585. stuart_writel(si, FCR_TRFIFOE | FCR_ITL_32, STFCR);
  586. /* disable FICP */
  587. ficp_writel(si, 0, ICCR0);
  588. /* configure FICP ICCR2 */
  589. ficp_writel(si, ICCR2_TXP | ICCR2_TRIG_32, ICCR2);
  590. /* force SIR reinitialization */
  591. si->speed = 4000000;
  592. pxa_irda_set_speed(si, 9600);
  593. printk(KERN_DEBUG "pxa_ir: irda startup\n");
  594. }
  595. static void pxa_irda_shutdown(struct pxa_irda *si)
  596. {
  597. unsigned long flags;
  598. local_irq_save(flags);
  599. /* disable STUART and interrupt */
  600. stuart_writel(si, 0, STIER);
  601. /* disable STUART SIR mode */
  602. stuart_writel(si, 0, STISR);
  603. /* disable DMA */
  604. dmaengine_terminate_all(si->rxdma);
  605. dmaengine_terminate_all(si->txdma);
  606. /* disable FICP */
  607. ficp_writel(si, 0, ICCR0);
  608. /* disable the STUART or FICP clocks */
  609. pxa_irda_disable_clk(si);
  610. local_irq_restore(flags);
  611. /* power off board transceiver */
  612. pxa_irda_set_mode(si, IR_OFF);
  613. printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
  614. }
  615. static int pxa_irda_start(struct net_device *dev)
  616. {
  617. struct pxa_irda *si = netdev_priv(dev);
  618. dma_cap_mask_t mask;
  619. struct dma_slave_config config;
  620. struct pxad_param param;
  621. int err;
  622. si->speed = 9600;
  623. err = request_irq(si->uart_irq, pxa_irda_sir_irq, 0, dev->name, dev);
  624. if (err)
  625. goto err_irq1;
  626. err = request_irq(si->icp_irq, pxa_irda_fir_irq, 0, dev->name, dev);
  627. if (err)
  628. goto err_irq2;
  629. /*
  630. * The interrupt must remain disabled for now.
  631. */
  632. disable_irq(si->uart_irq);
  633. disable_irq(si->icp_irq);
  634. err = -EBUSY;
  635. dma_cap_zero(mask);
  636. dma_cap_set(DMA_SLAVE, mask);
  637. param.prio = PXAD_PRIO_LOWEST;
  638. memset(&config, 0, sizeof(config));
  639. config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  640. config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  641. config.src_addr = (dma_addr_t)si->irda_base + ICDR;
  642. config.dst_addr = (dma_addr_t)si->irda_base + ICDR;
  643. config.src_maxburst = 32;
  644. config.dst_maxburst = 32;
  645. param.drcmr = si->drcmr_rx;
  646. si->rxdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
  647. &param, &dev->dev, "rx");
  648. if (!si->rxdma)
  649. goto err_rx_dma;
  650. param.drcmr = si->drcmr_tx;
  651. si->txdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
  652. &param, &dev->dev, "tx");
  653. if (!si->txdma)
  654. goto err_tx_dma;
  655. err = dmaengine_slave_config(si->rxdma, &config);
  656. if (err)
  657. goto err_dma_rx_buff;
  658. err = dmaengine_slave_config(si->txdma, &config);
  659. if (err)
  660. goto err_dma_rx_buff;
  661. err = -ENOMEM;
  662. si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
  663. &si->dma_rx_buff_phy, GFP_KERNEL);
  664. if (!si->dma_rx_buff)
  665. goto err_dma_rx_buff;
  666. si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
  667. &si->dma_tx_buff_phy, GFP_KERNEL);
  668. if (!si->dma_tx_buff)
  669. goto err_dma_tx_buff;
  670. /* Setup the serial port for the initial speed. */
  671. pxa_irda_startup(si);
  672. /*
  673. * Open a new IrLAP layer instance.
  674. */
  675. si->irlap = irlap_open(dev, &si->qos, "pxa");
  676. err = -ENOMEM;
  677. if (!si->irlap)
  678. goto err_irlap;
  679. /*
  680. * Now enable the interrupt and start the queue
  681. */
  682. enable_irq(si->uart_irq);
  683. enable_irq(si->icp_irq);
  684. netif_start_queue(dev);
  685. printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
  686. return 0;
  687. err_irlap:
  688. pxa_irda_shutdown(si);
  689. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
  690. err_dma_tx_buff:
  691. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
  692. err_dma_rx_buff:
  693. dma_release_channel(si->txdma);
  694. err_tx_dma:
  695. dma_release_channel(si->rxdma);
  696. err_rx_dma:
  697. free_irq(si->icp_irq, dev);
  698. err_irq2:
  699. free_irq(si->uart_irq, dev);
  700. err_irq1:
  701. return err;
  702. }
  703. static int pxa_irda_stop(struct net_device *dev)
  704. {
  705. struct pxa_irda *si = netdev_priv(dev);
  706. netif_stop_queue(dev);
  707. pxa_irda_shutdown(si);
  708. /* Stop IrLAP */
  709. if (si->irlap) {
  710. irlap_close(si->irlap);
  711. si->irlap = NULL;
  712. }
  713. free_irq(si->uart_irq, dev);
  714. free_irq(si->icp_irq, dev);
  715. dmaengine_terminate_all(si->rxdma);
  716. dmaengine_terminate_all(si->txdma);
  717. dma_release_channel(si->rxdma);
  718. dma_release_channel(si->txdma);
  719. if (si->dma_rx_buff)
  720. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
  721. if (si->dma_tx_buff)
  722. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
  723. printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
  724. return 0;
  725. }
  726. static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
  727. {
  728. struct net_device *dev = platform_get_drvdata(_dev);
  729. struct pxa_irda *si;
  730. if (dev && netif_running(dev)) {
  731. si = netdev_priv(dev);
  732. netif_device_detach(dev);
  733. pxa_irda_shutdown(si);
  734. }
  735. return 0;
  736. }
  737. static int pxa_irda_resume(struct platform_device *_dev)
  738. {
  739. struct net_device *dev = platform_get_drvdata(_dev);
  740. struct pxa_irda *si;
  741. if (dev && netif_running(dev)) {
  742. si = netdev_priv(dev);
  743. pxa_irda_startup(si);
  744. netif_device_attach(dev);
  745. netif_wake_queue(dev);
  746. }
  747. return 0;
  748. }
  749. static int pxa_irda_init_iobuf(iobuff_t *io, int size)
  750. {
  751. io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
  752. if (io->head != NULL) {
  753. io->truesize = size;
  754. io->in_frame = FALSE;
  755. io->state = OUTSIDE_FRAME;
  756. io->data = io->head;
  757. }
  758. return io->head ? 0 : -ENOMEM;
  759. }
  760. static const struct net_device_ops pxa_irda_netdev_ops = {
  761. .ndo_open = pxa_irda_start,
  762. .ndo_stop = pxa_irda_stop,
  763. .ndo_start_xmit = pxa_irda_hard_xmit,
  764. .ndo_do_ioctl = pxa_irda_ioctl,
  765. };
  766. static int pxa_irda_probe(struct platform_device *pdev)
  767. {
  768. struct net_device *dev;
  769. struct resource *res;
  770. struct pxa_irda *si;
  771. void __iomem *ficp, *stuart;
  772. unsigned int baudrate_mask;
  773. int err;
  774. if (!pdev->dev.platform_data)
  775. return -ENODEV;
  776. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  777. ficp = devm_ioremap_resource(&pdev->dev, res);
  778. if (IS_ERR(ficp)) {
  779. dev_err(&pdev->dev, "resource ficp not defined\n");
  780. return PTR_ERR(ficp);
  781. }
  782. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  783. stuart = devm_ioremap_resource(&pdev->dev, res);
  784. if (IS_ERR(stuart)) {
  785. dev_err(&pdev->dev, "resource stuart not defined\n");
  786. return PTR_ERR(stuart);
  787. }
  788. dev = alloc_irdadev(sizeof(struct pxa_irda));
  789. if (!dev) {
  790. err = -ENOMEM;
  791. goto err_mem_1;
  792. }
  793. SET_NETDEV_DEV(dev, &pdev->dev);
  794. si = netdev_priv(dev);
  795. si->dev = &pdev->dev;
  796. si->pdata = pdev->dev.platform_data;
  797. si->irda_base = ficp;
  798. si->stuart_base = stuart;
  799. si->uart_irq = platform_get_irq(pdev, 0);
  800. si->icp_irq = platform_get_irq(pdev, 1);
  801. si->sir_clk = devm_clk_get(&pdev->dev, "UARTCLK");
  802. si->fir_clk = devm_clk_get(&pdev->dev, "FICPCLK");
  803. if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
  804. err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
  805. goto err_mem_4;
  806. }
  807. res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  808. if (res)
  809. si->drcmr_rx = res->start;
  810. res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
  811. if (res)
  812. si->drcmr_tx = res->start;
  813. /*
  814. * Initialise the SIR buffers
  815. */
  816. err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
  817. if (err)
  818. goto err_mem_4;
  819. err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
  820. if (err)
  821. goto err_mem_5;
  822. if (gpio_is_valid(si->pdata->gpio_pwdown)) {
  823. err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
  824. if (err)
  825. goto err_startup;
  826. err = gpio_direction_output(si->pdata->gpio_pwdown,
  827. !si->pdata->gpio_pwdown_inverted);
  828. if (err) {
  829. gpio_free(si->pdata->gpio_pwdown);
  830. goto err_startup;
  831. }
  832. }
  833. if (si->pdata->startup) {
  834. err = si->pdata->startup(si->dev);
  835. if (err)
  836. goto err_startup;
  837. }
  838. if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
  839. dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
  840. dev->netdev_ops = &pxa_irda_netdev_ops;
  841. irda_init_max_qos_capabilies(&si->qos);
  842. baudrate_mask = 0;
  843. if (si->pdata->transceiver_cap & IR_SIRMODE)
  844. baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
  845. if (si->pdata->transceiver_cap & IR_FIRMODE)
  846. baudrate_mask |= IR_4000000 << 8;
  847. si->qos.baud_rate.bits &= baudrate_mask;
  848. si->qos.min_turn_time.bits = 7; /* 1ms or more */
  849. irda_qos_bits_to_value(&si->qos);
  850. err = register_netdev(dev);
  851. if (err == 0)
  852. platform_set_drvdata(pdev, dev);
  853. if (err) {
  854. if (si->pdata->shutdown)
  855. si->pdata->shutdown(si->dev);
  856. err_startup:
  857. kfree(si->tx_buff.head);
  858. err_mem_5:
  859. kfree(si->rx_buff.head);
  860. err_mem_4:
  861. free_netdev(dev);
  862. }
  863. err_mem_1:
  864. return err;
  865. }
  866. static int pxa_irda_remove(struct platform_device *_dev)
  867. {
  868. struct net_device *dev = platform_get_drvdata(_dev);
  869. if (dev) {
  870. struct pxa_irda *si = netdev_priv(dev);
  871. unregister_netdev(dev);
  872. if (gpio_is_valid(si->pdata->gpio_pwdown))
  873. gpio_free(si->pdata->gpio_pwdown);
  874. if (si->pdata->shutdown)
  875. si->pdata->shutdown(si->dev);
  876. kfree(si->tx_buff.head);
  877. kfree(si->rx_buff.head);
  878. free_netdev(dev);
  879. }
  880. return 0;
  881. }
  882. static struct platform_driver pxa_ir_driver = {
  883. .driver = {
  884. .name = "pxa2xx-ir",
  885. },
  886. .probe = pxa_irda_probe,
  887. .remove = pxa_irda_remove,
  888. .suspend = pxa_irda_suspend,
  889. .resume = pxa_irda_resume,
  890. };
  891. module_platform_driver(pxa_ir_driver);
  892. MODULE_LICENSE("GPL");
  893. MODULE_ALIAS("platform:pxa2xx-ir");