sh_irda.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. /*
  2. * SuperH IrDA Driver
  3. *
  4. * Copyright (C) 2010 Renesas Solutions Corp.
  5. * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  6. *
  7. * Based on sh_sir.c
  8. * Copyright (C) 2009 Renesas Solutions Corp.
  9. * Copyright 2006-2009 Analog Devices Inc.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. /*
  16. * CAUTION
  17. *
  18. * This driver is very simple.
  19. * So, it doesn't have below support now
  20. * - MIR/FIR support
  21. * - DMA transfer support
  22. * - FIFO mode support
  23. */
  24. #include <linux/io.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/clk.h>
  30. #include <net/irda/wrapper.h>
  31. #include <net/irda/irda_device.h>
  32. #define DRIVER_NAME "sh_irda"
  33. #define __IRDARAM_LEN 0x1039
  34. #define IRTMR 0x1F00 /* Transfer mode */
  35. #define IRCFR 0x1F02 /* Configuration */
  36. #define IRCTR 0x1F04 /* IR control */
  37. #define IRTFLR 0x1F20 /* Transmit frame length */
  38. #define IRTCTR 0x1F22 /* Transmit control */
  39. #define IRRFLR 0x1F40 /* Receive frame length */
  40. #define IRRCTR 0x1F42 /* Receive control */
  41. #define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
  42. #define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
  43. #define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
  44. #define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
  45. #define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
  46. #define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
  47. #define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
  48. #define CRCCTR 0x1F80 /* CRC engine control */
  49. #define CRCIR 0x1F86 /* CRC engine input data */
  50. #define CRCCR 0x1F8A /* CRC engine calculation */
  51. #define CRCOR 0x1F8E /* CRC engine output data */
  52. #define FIFOCP 0x1FC0 /* FIFO current pointer */
  53. #define FIFOFP 0x1FC2 /* FIFO follow pointer */
  54. #define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
  55. #define FIFORSOR 0x1FC6 /* FIFO receive status OR */
  56. #define FIFOSEL 0x1FC8 /* FIFO select */
  57. #define FIFORS 0x1FCA /* FIFO receive status */
  58. #define FIFORFL 0x1FCC /* FIFO receive frame length */
  59. #define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
  60. #define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
  61. #define BIFCTL 0x1FD2 /* BUS interface control */
  62. #define IRDARAM 0x0000 /* IrDA buffer RAM */
  63. #define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
  64. /* IRTMR */
  65. #define TMD_MASK (0x3 << 14) /* Transfer Mode */
  66. #define TMD_SIR (0x0 << 14)
  67. #define TMD_MIR (0x3 << 14)
  68. #define TMD_FIR (0x2 << 14)
  69. #define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
  70. #define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
  71. #define SIM (1 << 0) /* SIR Interrupt Mask */
  72. #define xIM_MASK (FIFORIM | MIM | SIM)
  73. /* IRCFR */
  74. #define RTO_SHIFT 8 /* shift for Receive Timeout */
  75. #define RTO (0x3 << RTO_SHIFT)
  76. /* IRTCTR */
  77. #define ARMOD (1 << 15) /* Auto-Receive Mode */
  78. #define TE (1 << 0) /* Transmit Enable */
  79. /* IRRFLR */
  80. #define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
  81. /* IRRCTR */
  82. #define RE (1 << 0) /* Receive Enable */
  83. /*
  84. * SIRISR, SIRIMR, SIRICR,
  85. * MFIRISR, MFIRIMR, MFIRICR
  86. */
  87. #define FRE (1 << 15) /* Frame Receive End */
  88. #define TROV (1 << 11) /* Transfer Area Overflow */
  89. #define xIR_9 (1 << 9)
  90. #define TOT xIR_9 /* for SIR Timeout */
  91. #define ABTD xIR_9 /* for MIR/FIR Abort Detection */
  92. #define xIR_8 (1 << 8)
  93. #define FER xIR_8 /* for SIR Framing Error */
  94. #define CRCER xIR_8 /* for MIR/FIR CRC error */
  95. #define FTE (1 << 7) /* Frame Transmit End */
  96. #define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
  97. /* SIRBCR */
  98. #define BRC_MASK (0x3F) /* mask for Baud Rate Count */
  99. /* CRCCTR */
  100. #define CRC_RST (1 << 15) /* CRC Engine Reset */
  101. #define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
  102. /* CRCIR */
  103. #define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
  104. /************************************************************************
  105. enum / structure
  106. ************************************************************************/
  107. enum sh_irda_mode {
  108. SH_IRDA_NONE = 0,
  109. SH_IRDA_SIR,
  110. SH_IRDA_MIR,
  111. SH_IRDA_FIR,
  112. };
  113. struct sh_irda_self;
  114. struct sh_irda_xir_func {
  115. int (*xir_fre) (struct sh_irda_self *self);
  116. int (*xir_trov) (struct sh_irda_self *self);
  117. int (*xir_9) (struct sh_irda_self *self);
  118. int (*xir_8) (struct sh_irda_self *self);
  119. int (*xir_fte) (struct sh_irda_self *self);
  120. };
  121. struct sh_irda_self {
  122. void __iomem *membase;
  123. unsigned int irq;
  124. struct platform_device *pdev;
  125. struct net_device *ndev;
  126. struct irlap_cb *irlap;
  127. struct qos_info qos;
  128. iobuff_t tx_buff;
  129. iobuff_t rx_buff;
  130. enum sh_irda_mode mode;
  131. spinlock_t lock;
  132. struct sh_irda_xir_func *xir_func;
  133. };
  134. /************************************************************************
  135. common function
  136. ************************************************************************/
  137. static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
  138. {
  139. unsigned long flags;
  140. spin_lock_irqsave(&self->lock, flags);
  141. iowrite16(data, self->membase + offset);
  142. spin_unlock_irqrestore(&self->lock, flags);
  143. }
  144. static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
  145. {
  146. unsigned long flags;
  147. u16 ret;
  148. spin_lock_irqsave(&self->lock, flags);
  149. ret = ioread16(self->membase + offset);
  150. spin_unlock_irqrestore(&self->lock, flags);
  151. return ret;
  152. }
  153. static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
  154. u16 mask, u16 data)
  155. {
  156. unsigned long flags;
  157. u16 old, new;
  158. spin_lock_irqsave(&self->lock, flags);
  159. old = ioread16(self->membase + offset);
  160. new = (old & ~mask) | data;
  161. if (old != new)
  162. iowrite16(data, self->membase + offset);
  163. spin_unlock_irqrestore(&self->lock, flags);
  164. }
  165. /************************************************************************
  166. mode function
  167. ************************************************************************/
  168. /*=====================================
  169. *
  170. * common
  171. *
  172. *=====================================*/
  173. static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
  174. {
  175. struct device *dev = &self->ndev->dev;
  176. sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
  177. dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
  178. }
  179. static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
  180. {
  181. struct device *dev = &self->ndev->dev;
  182. if (SH_IRDA_SIR != self->mode)
  183. interval = 0;
  184. if (interval < 0 || interval > 2) {
  185. dev_err(dev, "unsupported timeout interval\n");
  186. return -EINVAL;
  187. }
  188. sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
  189. return 0;
  190. }
  191. static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
  192. {
  193. struct device *dev = &self->ndev->dev;
  194. u16 val;
  195. if (baudrate < 0)
  196. return 0;
  197. if (SH_IRDA_SIR != self->mode) {
  198. dev_err(dev, "it is not SIR mode\n");
  199. return -EINVAL;
  200. }
  201. /*
  202. * Baud rate (bits/s) =
  203. * (48 MHz / 26) / (baud rate counter value + 1) x 16
  204. */
  205. val = (48000000 / 26 / 16 / baudrate) - 1;
  206. dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
  207. sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
  208. return 0;
  209. }
  210. static int sh_irda_get_rcv_length(struct sh_irda_self *self)
  211. {
  212. return RFL_MASK & sh_irda_read(self, IRRFLR);
  213. }
  214. /*=====================================
  215. *
  216. * NONE MODE
  217. *
  218. *=====================================*/
  219. static int sh_irda_xir_fre(struct sh_irda_self *self)
  220. {
  221. struct device *dev = &self->ndev->dev;
  222. dev_err(dev, "none mode: frame recv\n");
  223. return 0;
  224. }
  225. static int sh_irda_xir_trov(struct sh_irda_self *self)
  226. {
  227. struct device *dev = &self->ndev->dev;
  228. dev_err(dev, "none mode: buffer ram over\n");
  229. return 0;
  230. }
  231. static int sh_irda_xir_9(struct sh_irda_self *self)
  232. {
  233. struct device *dev = &self->ndev->dev;
  234. dev_err(dev, "none mode: time over\n");
  235. return 0;
  236. }
  237. static int sh_irda_xir_8(struct sh_irda_self *self)
  238. {
  239. struct device *dev = &self->ndev->dev;
  240. dev_err(dev, "none mode: framing error\n");
  241. return 0;
  242. }
  243. static int sh_irda_xir_fte(struct sh_irda_self *self)
  244. {
  245. struct device *dev = &self->ndev->dev;
  246. dev_err(dev, "none mode: frame transmit end\n");
  247. return 0;
  248. }
  249. static struct sh_irda_xir_func sh_irda_xir_func = {
  250. .xir_fre = sh_irda_xir_fre,
  251. .xir_trov = sh_irda_xir_trov,
  252. .xir_9 = sh_irda_xir_9,
  253. .xir_8 = sh_irda_xir_8,
  254. .xir_fte = sh_irda_xir_fte,
  255. };
  256. /*=====================================
  257. *
  258. * MIR/FIR MODE
  259. *
  260. * MIR/FIR are not supported now
  261. *=====================================*/
  262. static struct sh_irda_xir_func sh_irda_mfir_func = {
  263. .xir_fre = sh_irda_xir_fre,
  264. .xir_trov = sh_irda_xir_trov,
  265. .xir_9 = sh_irda_xir_9,
  266. .xir_8 = sh_irda_xir_8,
  267. .xir_fte = sh_irda_xir_fte,
  268. };
  269. /*=====================================
  270. *
  271. * SIR MODE
  272. *
  273. *=====================================*/
  274. static int sh_irda_sir_fre(struct sh_irda_self *self)
  275. {
  276. struct device *dev = &self->ndev->dev;
  277. u16 data16;
  278. u8 *data = (u8 *)&data16;
  279. int len = sh_irda_get_rcv_length(self);
  280. int i, j;
  281. if (len > IRDARAM_LEN)
  282. len = IRDARAM_LEN;
  283. dev_dbg(dev, "frame recv length = %d\n", len);
  284. for (i = 0; i < len; i++) {
  285. j = i % 2;
  286. if (!j)
  287. data16 = sh_irda_read(self, IRDARAM + i);
  288. async_unwrap_char(self->ndev, &self->ndev->stats,
  289. &self->rx_buff, data[j]);
  290. }
  291. self->ndev->last_rx = jiffies;
  292. sh_irda_rcv_ctrl(self, 1);
  293. return 0;
  294. }
  295. static int sh_irda_sir_trov(struct sh_irda_self *self)
  296. {
  297. struct device *dev = &self->ndev->dev;
  298. dev_err(dev, "buffer ram over\n");
  299. sh_irda_rcv_ctrl(self, 1);
  300. return 0;
  301. }
  302. static int sh_irda_sir_tot(struct sh_irda_self *self)
  303. {
  304. struct device *dev = &self->ndev->dev;
  305. dev_err(dev, "time over\n");
  306. sh_irda_set_baudrate(self, 9600);
  307. sh_irda_rcv_ctrl(self, 1);
  308. return 0;
  309. }
  310. static int sh_irda_sir_fer(struct sh_irda_self *self)
  311. {
  312. struct device *dev = &self->ndev->dev;
  313. dev_err(dev, "framing error\n");
  314. sh_irda_rcv_ctrl(self, 1);
  315. return 0;
  316. }
  317. static int sh_irda_sir_fte(struct sh_irda_self *self)
  318. {
  319. struct device *dev = &self->ndev->dev;
  320. dev_dbg(dev, "frame transmit end\n");
  321. netif_wake_queue(self->ndev);
  322. return 0;
  323. }
  324. static struct sh_irda_xir_func sh_irda_sir_func = {
  325. .xir_fre = sh_irda_sir_fre,
  326. .xir_trov = sh_irda_sir_trov,
  327. .xir_9 = sh_irda_sir_tot,
  328. .xir_8 = sh_irda_sir_fer,
  329. .xir_fte = sh_irda_sir_fte,
  330. };
  331. static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
  332. {
  333. struct device *dev = &self->ndev->dev;
  334. struct sh_irda_xir_func *func;
  335. const char *name;
  336. u16 data;
  337. switch (mode) {
  338. case SH_IRDA_SIR:
  339. name = "SIR";
  340. data = TMD_SIR;
  341. func = &sh_irda_sir_func;
  342. break;
  343. case SH_IRDA_MIR:
  344. name = "MIR";
  345. data = TMD_MIR;
  346. func = &sh_irda_mfir_func;
  347. break;
  348. case SH_IRDA_FIR:
  349. name = "FIR";
  350. data = TMD_FIR;
  351. func = &sh_irda_mfir_func;
  352. break;
  353. default:
  354. name = "NONE";
  355. data = 0;
  356. func = &sh_irda_xir_func;
  357. break;
  358. }
  359. self->mode = mode;
  360. self->xir_func = func;
  361. sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
  362. dev_dbg(dev, "switch to %s mode", name);
  363. }
  364. /************************************************************************
  365. irq function
  366. ************************************************************************/
  367. static void sh_irda_set_irq_mask(struct sh_irda_self *self)
  368. {
  369. u16 tmr_hole;
  370. u16 xir_reg;
  371. /* set all mask */
  372. sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
  373. sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
  374. sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
  375. /* clear irq */
  376. sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
  377. sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
  378. switch (self->mode) {
  379. case SH_IRDA_SIR:
  380. tmr_hole = SIM;
  381. xir_reg = SIRIMR;
  382. break;
  383. case SH_IRDA_MIR:
  384. case SH_IRDA_FIR:
  385. tmr_hole = MIM;
  386. xir_reg = MFIRIMR;
  387. break;
  388. default:
  389. tmr_hole = 0;
  390. xir_reg = 0;
  391. break;
  392. }
  393. /* open mask */
  394. if (xir_reg) {
  395. sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
  396. sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
  397. }
  398. }
  399. static irqreturn_t sh_irda_irq(int irq, void *dev_id)
  400. {
  401. struct sh_irda_self *self = dev_id;
  402. struct sh_irda_xir_func *func = self->xir_func;
  403. u16 isr = sh_irda_read(self, SIRISR);
  404. /* clear irq */
  405. sh_irda_write(self, SIRICR, isr);
  406. if (isr & FRE)
  407. func->xir_fre(self);
  408. if (isr & TROV)
  409. func->xir_trov(self);
  410. if (isr & xIR_9)
  411. func->xir_9(self);
  412. if (isr & xIR_8)
  413. func->xir_8(self);
  414. if (isr & FTE)
  415. func->xir_fte(self);
  416. return IRQ_HANDLED;
  417. }
  418. /************************************************************************
  419. CRC function
  420. ************************************************************************/
  421. static void sh_irda_crc_reset(struct sh_irda_self *self)
  422. {
  423. sh_irda_write(self, CRCCTR, CRC_RST);
  424. }
  425. static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
  426. {
  427. sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
  428. }
  429. static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
  430. {
  431. return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
  432. }
  433. static u16 sh_irda_crc_out(struct sh_irda_self *self)
  434. {
  435. return sh_irda_read(self, CRCOR);
  436. }
  437. static int sh_irda_crc_init(struct sh_irda_self *self)
  438. {
  439. struct device *dev = &self->ndev->dev;
  440. int ret = -EIO;
  441. u16 val;
  442. sh_irda_crc_reset(self);
  443. sh_irda_crc_add(self, 0xCC);
  444. sh_irda_crc_add(self, 0xF5);
  445. sh_irda_crc_add(self, 0xF1);
  446. sh_irda_crc_add(self, 0xA7);
  447. val = sh_irda_crc_cnt(self);
  448. if (4 != val) {
  449. dev_err(dev, "CRC count error %x\n", val);
  450. goto crc_init_out;
  451. }
  452. val = sh_irda_crc_out(self);
  453. if (0x51DF != val) {
  454. dev_err(dev, "CRC result error%x\n", val);
  455. goto crc_init_out;
  456. }
  457. ret = 0;
  458. crc_init_out:
  459. sh_irda_crc_reset(self);
  460. return ret;
  461. }
  462. /************************************************************************
  463. iobuf function
  464. ************************************************************************/
  465. static void sh_irda_remove_iobuf(struct sh_irda_self *self)
  466. {
  467. kfree(self->rx_buff.head);
  468. self->tx_buff.head = NULL;
  469. self->tx_buff.data = NULL;
  470. self->rx_buff.head = NULL;
  471. self->rx_buff.data = NULL;
  472. }
  473. static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
  474. {
  475. if (self->rx_buff.head ||
  476. self->tx_buff.head) {
  477. dev_err(&self->ndev->dev, "iobuff has already existed.");
  478. return -EINVAL;
  479. }
  480. /* rx_buff */
  481. self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
  482. if (!self->rx_buff.head)
  483. return -ENOMEM;
  484. self->rx_buff.truesize = rxsize;
  485. self->rx_buff.in_frame = FALSE;
  486. self->rx_buff.state = OUTSIDE_FRAME;
  487. self->rx_buff.data = self->rx_buff.head;
  488. /* tx_buff */
  489. self->tx_buff.head = self->membase + IRDARAM;
  490. self->tx_buff.truesize = IRDARAM_LEN;
  491. return 0;
  492. }
  493. /************************************************************************
  494. net_device_ops function
  495. ************************************************************************/
  496. static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
  497. {
  498. struct sh_irda_self *self = netdev_priv(ndev);
  499. struct device *dev = &self->ndev->dev;
  500. int speed = irda_get_next_speed(skb);
  501. int ret;
  502. dev_dbg(dev, "hard xmit\n");
  503. netif_stop_queue(ndev);
  504. sh_irda_rcv_ctrl(self, 0);
  505. ret = sh_irda_set_baudrate(self, speed);
  506. if (ret < 0)
  507. goto sh_irda_hard_xmit_end;
  508. self->tx_buff.len = 0;
  509. if (skb->len) {
  510. unsigned long flags;
  511. spin_lock_irqsave(&self->lock, flags);
  512. self->tx_buff.len = async_wrap_skb(skb,
  513. self->tx_buff.head,
  514. self->tx_buff.truesize);
  515. spin_unlock_irqrestore(&self->lock, flags);
  516. if (self->tx_buff.len > self->tx_buff.truesize)
  517. self->tx_buff.len = self->tx_buff.truesize;
  518. sh_irda_write(self, IRTFLR, self->tx_buff.len);
  519. sh_irda_write(self, IRTCTR, ARMOD | TE);
  520. } else
  521. goto sh_irda_hard_xmit_end;
  522. dev_kfree_skb(skb);
  523. return 0;
  524. sh_irda_hard_xmit_end:
  525. sh_irda_set_baudrate(self, 9600);
  526. netif_wake_queue(self->ndev);
  527. sh_irda_rcv_ctrl(self, 1);
  528. dev_kfree_skb(skb);
  529. return ret;
  530. }
  531. static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
  532. {
  533. /*
  534. * FIXME
  535. *
  536. * This function is needed for irda framework.
  537. * But nothing to do now
  538. */
  539. return 0;
  540. }
  541. static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
  542. {
  543. struct sh_irda_self *self = netdev_priv(ndev);
  544. return &self->ndev->stats;
  545. }
  546. static int sh_irda_open(struct net_device *ndev)
  547. {
  548. struct sh_irda_self *self = netdev_priv(ndev);
  549. int err;
  550. pm_runtime_get_sync(&self->pdev->dev);
  551. err = sh_irda_crc_init(self);
  552. if (err)
  553. goto open_err;
  554. sh_irda_set_mode(self, SH_IRDA_SIR);
  555. sh_irda_set_timeout(self, 2);
  556. sh_irda_set_baudrate(self, 9600);
  557. self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
  558. if (!self->irlap) {
  559. err = -ENODEV;
  560. goto open_err;
  561. }
  562. netif_start_queue(ndev);
  563. sh_irda_rcv_ctrl(self, 1);
  564. sh_irda_set_irq_mask(self);
  565. dev_info(&ndev->dev, "opened\n");
  566. return 0;
  567. open_err:
  568. pm_runtime_put_sync(&self->pdev->dev);
  569. return err;
  570. }
  571. static int sh_irda_stop(struct net_device *ndev)
  572. {
  573. struct sh_irda_self *self = netdev_priv(ndev);
  574. /* Stop IrLAP */
  575. if (self->irlap) {
  576. irlap_close(self->irlap);
  577. self->irlap = NULL;
  578. }
  579. netif_stop_queue(ndev);
  580. pm_runtime_put_sync(&self->pdev->dev);
  581. dev_info(&ndev->dev, "stopped\n");
  582. return 0;
  583. }
  584. static const struct net_device_ops sh_irda_ndo = {
  585. .ndo_open = sh_irda_open,
  586. .ndo_stop = sh_irda_stop,
  587. .ndo_start_xmit = sh_irda_hard_xmit,
  588. .ndo_do_ioctl = sh_irda_ioctl,
  589. .ndo_get_stats = sh_irda_stats,
  590. };
  591. /************************************************************************
  592. platform_driver function
  593. ************************************************************************/
  594. static int sh_irda_probe(struct platform_device *pdev)
  595. {
  596. struct net_device *ndev;
  597. struct sh_irda_self *self;
  598. struct resource *res;
  599. int irq;
  600. int err = -ENOMEM;
  601. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  602. irq = platform_get_irq(pdev, 0);
  603. if (!res || irq < 0) {
  604. dev_err(&pdev->dev, "Not enough platform resources.\n");
  605. goto exit;
  606. }
  607. ndev = alloc_irdadev(sizeof(*self));
  608. if (!ndev)
  609. goto exit;
  610. self = netdev_priv(ndev);
  611. self->membase = ioremap_nocache(res->start, resource_size(res));
  612. if (!self->membase) {
  613. err = -ENXIO;
  614. dev_err(&pdev->dev, "Unable to ioremap.\n");
  615. goto err_mem_1;
  616. }
  617. err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
  618. if (err)
  619. goto err_mem_2;
  620. self->pdev = pdev;
  621. pm_runtime_enable(&pdev->dev);
  622. irda_init_max_qos_capabilies(&self->qos);
  623. ndev->netdev_ops = &sh_irda_ndo;
  624. ndev->irq = irq;
  625. self->ndev = ndev;
  626. self->qos.baud_rate.bits &= IR_9600; /* FIXME */
  627. self->qos.min_turn_time.bits = 1; /* 10 ms or more */
  628. spin_lock_init(&self->lock);
  629. irda_qos_bits_to_value(&self->qos);
  630. err = register_netdev(ndev);
  631. if (err)
  632. goto err_mem_4;
  633. platform_set_drvdata(pdev, ndev);
  634. err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
  635. if (err) {
  636. dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
  637. goto err_mem_4;
  638. }
  639. dev_info(&pdev->dev, "SuperH IrDA probed\n");
  640. goto exit;
  641. err_mem_4:
  642. pm_runtime_disable(&pdev->dev);
  643. sh_irda_remove_iobuf(self);
  644. err_mem_2:
  645. iounmap(self->membase);
  646. err_mem_1:
  647. free_netdev(ndev);
  648. exit:
  649. return err;
  650. }
  651. static int sh_irda_remove(struct platform_device *pdev)
  652. {
  653. struct net_device *ndev = platform_get_drvdata(pdev);
  654. struct sh_irda_self *self = netdev_priv(ndev);
  655. if (!self)
  656. return 0;
  657. unregister_netdev(ndev);
  658. pm_runtime_disable(&pdev->dev);
  659. sh_irda_remove_iobuf(self);
  660. iounmap(self->membase);
  661. free_netdev(ndev);
  662. return 0;
  663. }
  664. static int sh_irda_runtime_nop(struct device *dev)
  665. {
  666. /* Runtime PM callback shared between ->runtime_suspend()
  667. * and ->runtime_resume(). Simply returns success.
  668. *
  669. * This driver re-initializes all registers after
  670. * pm_runtime_get_sync() anyway so there is no need
  671. * to save and restore registers here.
  672. */
  673. return 0;
  674. }
  675. static const struct dev_pm_ops sh_irda_pm_ops = {
  676. .runtime_suspend = sh_irda_runtime_nop,
  677. .runtime_resume = sh_irda_runtime_nop,
  678. };
  679. static struct platform_driver sh_irda_driver = {
  680. .probe = sh_irda_probe,
  681. .remove = sh_irda_remove,
  682. .driver = {
  683. .name = DRIVER_NAME,
  684. .pm = &sh_irda_pm_ops,
  685. },
  686. };
  687. module_platform_driver(sh_irda_driver);
  688. MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
  689. MODULE_DESCRIPTION("SuperH IrDA driver");
  690. MODULE_LICENSE("GPL");