bfin_sir.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818
  1. /*
  2. * Blackfin Infra-red Driver
  3. *
  4. * Copyright 2006-2009 Analog Devices Inc.
  5. *
  6. * Enter bugs at http://blackfin.uclinux.org/
  7. *
  8. * Licensed under the GPL-2 or later.
  9. *
  10. */
  11. #include "bfin_sir.h"
  12. #ifdef CONFIG_SIR_BFIN_DMA
  13. #define DMA_SIR_RX_XCNT 10
  14. #define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
  15. #define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
  16. #endif
  17. #if ANOMALY_05000447
  18. static int max_rate = 57600;
  19. #else
  20. static int max_rate = 115200;
  21. #endif
  22. static void turnaround_delay(unsigned long last_jif, int mtt)
  23. {
  24. long ticks;
  25. mtt = mtt < 10000 ? 10000 : mtt;
  26. ticks = 1 + mtt / (USEC_PER_SEC / HZ);
  27. schedule_timeout_uninterruptible(ticks);
  28. }
  29. static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
  30. {
  31. int i;
  32. struct resource *res;
  33. for (i = 0; i < pdev->num_resources; i++) {
  34. res = &pdev->resource[i];
  35. switch (res->flags) {
  36. case IORESOURCE_MEM:
  37. sp->membase = (void __iomem *)res->start;
  38. break;
  39. case IORESOURCE_IRQ:
  40. sp->irq = res->start;
  41. break;
  42. case IORESOURCE_DMA:
  43. sp->rx_dma_channel = res->start;
  44. sp->tx_dma_channel = res->end;
  45. break;
  46. default:
  47. break;
  48. }
  49. }
  50. sp->clk = get_sclk();
  51. #ifdef CONFIG_SIR_BFIN_DMA
  52. sp->tx_done = 1;
  53. init_timer(&(sp->rx_dma_timer));
  54. #endif
  55. }
  56. static void bfin_sir_stop_tx(struct bfin_sir_port *port)
  57. {
  58. #ifdef CONFIG_SIR_BFIN_DMA
  59. disable_dma(port->tx_dma_channel);
  60. #endif
  61. while (!(UART_GET_LSR(port) & THRE)) {
  62. cpu_relax();
  63. continue;
  64. }
  65. UART_CLEAR_IER(port, ETBEI);
  66. }
  67. static void bfin_sir_enable_tx(struct bfin_sir_port *port)
  68. {
  69. UART_SET_IER(port, ETBEI);
  70. }
  71. static void bfin_sir_stop_rx(struct bfin_sir_port *port)
  72. {
  73. UART_CLEAR_IER(port, ERBFI);
  74. }
  75. static void bfin_sir_enable_rx(struct bfin_sir_port *port)
  76. {
  77. UART_SET_IER(port, ERBFI);
  78. }
  79. static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
  80. {
  81. int ret = -EINVAL;
  82. unsigned int quot;
  83. unsigned short val, lsr, lcr;
  84. static int utime;
  85. int count = 10;
  86. lcr = WLS(8);
  87. switch (speed) {
  88. case 9600:
  89. case 19200:
  90. case 38400:
  91. case 57600:
  92. case 115200:
  93. /*
  94. * IRDA is not affected by anomaly 05000230, so there is no
  95. * need to tweak the divisor like he UART driver (which will
  96. * slightly speed up the baud rate on us).
  97. */
  98. quot = (port->clk + (8 * speed)) / (16 * speed);
  99. do {
  100. udelay(utime);
  101. lsr = UART_GET_LSR(port);
  102. } while (!(lsr & TEMT) && count--);
  103. /* The useconds for 1 bits to transmit */
  104. utime = 1000000 / speed + 1;
  105. /* Clear UCEN bit to reset the UART state machine
  106. * and control registers
  107. */
  108. val = UART_GET_GCTL(port);
  109. val &= ~UCEN;
  110. UART_PUT_GCTL(port, val);
  111. /* Set DLAB in LCR to Access THR RBR IER */
  112. UART_SET_DLAB(port);
  113. SSYNC();
  114. UART_PUT_DLL(port, quot & 0xFF);
  115. UART_PUT_DLH(port, (quot >> 8) & 0xFF);
  116. SSYNC();
  117. /* Clear DLAB in LCR */
  118. UART_CLEAR_DLAB(port);
  119. SSYNC();
  120. UART_PUT_LCR(port, lcr);
  121. val = UART_GET_GCTL(port);
  122. val |= UCEN;
  123. UART_PUT_GCTL(port, val);
  124. ret = 0;
  125. break;
  126. default:
  127. printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
  128. break;
  129. }
  130. val = UART_GET_GCTL(port);
  131. /* If not add the 'RPOLC', we can't catch the receive interrupt.
  132. * It's related with the HW layout and the IR transiver.
  133. */
  134. val |= UMOD_IRDA | RPOLC;
  135. UART_PUT_GCTL(port, val);
  136. return ret;
  137. }
  138. static int bfin_sir_is_receiving(struct net_device *dev)
  139. {
  140. struct bfin_sir_self *self = netdev_priv(dev);
  141. struct bfin_sir_port *port = self->sir_port;
  142. if (!(UART_GET_IER(port) & ERBFI))
  143. return 0;
  144. return self->rx_buff.state != OUTSIDE_FRAME;
  145. }
  146. #ifdef CONFIG_SIR_BFIN_PIO
  147. static void bfin_sir_tx_chars(struct net_device *dev)
  148. {
  149. unsigned int chr;
  150. struct bfin_sir_self *self = netdev_priv(dev);
  151. struct bfin_sir_port *port = self->sir_port;
  152. if (self->tx_buff.len != 0) {
  153. chr = *(self->tx_buff.data);
  154. UART_PUT_CHAR(port, chr);
  155. self->tx_buff.data++;
  156. self->tx_buff.len--;
  157. } else {
  158. self->stats.tx_packets++;
  159. self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
  160. if (self->newspeed) {
  161. bfin_sir_set_speed(port, self->newspeed);
  162. self->speed = self->newspeed;
  163. self->newspeed = 0;
  164. }
  165. bfin_sir_stop_tx(port);
  166. bfin_sir_enable_rx(port);
  167. /* I'm hungry! */
  168. netif_wake_queue(dev);
  169. }
  170. }
  171. static void bfin_sir_rx_chars(struct net_device *dev)
  172. {
  173. struct bfin_sir_self *self = netdev_priv(dev);
  174. struct bfin_sir_port *port = self->sir_port;
  175. unsigned char ch;
  176. UART_CLEAR_LSR(port);
  177. ch = UART_GET_CHAR(port);
  178. async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
  179. dev->last_rx = jiffies;
  180. }
  181. static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
  182. {
  183. struct net_device *dev = dev_id;
  184. struct bfin_sir_self *self = netdev_priv(dev);
  185. struct bfin_sir_port *port = self->sir_port;
  186. spin_lock(&self->lock);
  187. while ((UART_GET_LSR(port) & DR))
  188. bfin_sir_rx_chars(dev);
  189. spin_unlock(&self->lock);
  190. return IRQ_HANDLED;
  191. }
  192. static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
  193. {
  194. struct net_device *dev = dev_id;
  195. struct bfin_sir_self *self = netdev_priv(dev);
  196. struct bfin_sir_port *port = self->sir_port;
  197. spin_lock(&self->lock);
  198. if (UART_GET_LSR(port) & THRE)
  199. bfin_sir_tx_chars(dev);
  200. spin_unlock(&self->lock);
  201. return IRQ_HANDLED;
  202. }
  203. #endif /* CONFIG_SIR_BFIN_PIO */
  204. #ifdef CONFIG_SIR_BFIN_DMA
  205. static void bfin_sir_dma_tx_chars(struct net_device *dev)
  206. {
  207. struct bfin_sir_self *self = netdev_priv(dev);
  208. struct bfin_sir_port *port = self->sir_port;
  209. if (!port->tx_done)
  210. return;
  211. port->tx_done = 0;
  212. if (self->tx_buff.len == 0) {
  213. self->stats.tx_packets++;
  214. if (self->newspeed) {
  215. bfin_sir_set_speed(port, self->newspeed);
  216. self->speed = self->newspeed;
  217. self->newspeed = 0;
  218. }
  219. bfin_sir_enable_rx(port);
  220. port->tx_done = 1;
  221. netif_wake_queue(dev);
  222. return;
  223. }
  224. blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
  225. (unsigned long)(self->tx_buff.data+self->tx_buff.len));
  226. set_dma_config(port->tx_dma_channel,
  227. set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
  228. INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
  229. DMA_SYNC_RESTART));
  230. set_dma_start_addr(port->tx_dma_channel,
  231. (unsigned long)(self->tx_buff.data));
  232. set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
  233. set_dma_x_modify(port->tx_dma_channel, 1);
  234. enable_dma(port->tx_dma_channel);
  235. }
  236. static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
  237. {
  238. struct net_device *dev = dev_id;
  239. struct bfin_sir_self *self = netdev_priv(dev);
  240. struct bfin_sir_port *port = self->sir_port;
  241. spin_lock(&self->lock);
  242. if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
  243. clear_dma_irqstat(port->tx_dma_channel);
  244. bfin_sir_stop_tx(port);
  245. self->stats.tx_packets++;
  246. self->stats.tx_bytes += self->tx_buff.len;
  247. self->tx_buff.len = 0;
  248. if (self->newspeed) {
  249. bfin_sir_set_speed(port, self->newspeed);
  250. self->speed = self->newspeed;
  251. self->newspeed = 0;
  252. }
  253. bfin_sir_enable_rx(port);
  254. /* I'm hungry! */
  255. netif_wake_queue(dev);
  256. port->tx_done = 1;
  257. }
  258. spin_unlock(&self->lock);
  259. return IRQ_HANDLED;
  260. }
  261. static void bfin_sir_dma_rx_chars(struct net_device *dev)
  262. {
  263. struct bfin_sir_self *self = netdev_priv(dev);
  264. struct bfin_sir_port *port = self->sir_port;
  265. int i;
  266. UART_CLEAR_LSR(port);
  267. for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
  268. async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
  269. }
  270. void bfin_sir_rx_dma_timeout(struct net_device *dev)
  271. {
  272. struct bfin_sir_self *self = netdev_priv(dev);
  273. struct bfin_sir_port *port = self->sir_port;
  274. int x_pos, pos;
  275. unsigned long flags;
  276. spin_lock_irqsave(&self->lock, flags);
  277. x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
  278. if (x_pos == DMA_SIR_RX_XCNT)
  279. x_pos = 0;
  280. pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
  281. if (pos > port->rx_dma_buf.tail) {
  282. port->rx_dma_buf.tail = pos;
  283. bfin_sir_dma_rx_chars(dev);
  284. port->rx_dma_buf.head = port->rx_dma_buf.tail;
  285. }
  286. spin_unlock_irqrestore(&self->lock, flags);
  287. }
  288. static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
  289. {
  290. struct net_device *dev = dev_id;
  291. struct bfin_sir_self *self = netdev_priv(dev);
  292. struct bfin_sir_port *port = self->sir_port;
  293. unsigned short irqstat;
  294. spin_lock(&self->lock);
  295. port->rx_dma_nrows++;
  296. port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
  297. bfin_sir_dma_rx_chars(dev);
  298. if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
  299. port->rx_dma_nrows = 0;
  300. port->rx_dma_buf.tail = 0;
  301. }
  302. port->rx_dma_buf.head = port->rx_dma_buf.tail;
  303. irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
  304. clear_dma_irqstat(port->rx_dma_channel);
  305. spin_unlock(&self->lock);
  306. mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
  307. return IRQ_HANDLED;
  308. }
  309. #endif /* CONFIG_SIR_BFIN_DMA */
  310. static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
  311. {
  312. #ifdef CONFIG_SIR_BFIN_DMA
  313. dma_addr_t dma_handle;
  314. #endif /* CONFIG_SIR_BFIN_DMA */
  315. if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
  316. dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
  317. return -EBUSY;
  318. }
  319. if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
  320. dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
  321. free_dma(port->rx_dma_channel);
  322. return -EBUSY;
  323. }
  324. #ifdef CONFIG_SIR_BFIN_DMA
  325. set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
  326. set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
  327. port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
  328. &dma_handle, GFP_DMA);
  329. port->rx_dma_buf.head = 0;
  330. port->rx_dma_buf.tail = 0;
  331. port->rx_dma_nrows = 0;
  332. set_dma_config(port->rx_dma_channel,
  333. set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
  334. INTR_ON_ROW, DIMENSION_2D,
  335. DATA_SIZE_8, DMA_SYNC_RESTART));
  336. set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
  337. set_dma_x_modify(port->rx_dma_channel, 1);
  338. set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
  339. set_dma_y_modify(port->rx_dma_channel, 1);
  340. set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
  341. enable_dma(port->rx_dma_channel);
  342. port->rx_dma_timer.data = (unsigned long)(dev);
  343. port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
  344. #else
  345. if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
  346. dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
  347. return -EBUSY;
  348. }
  349. if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
  350. dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
  351. free_irq(port->irq, dev);
  352. return -EBUSY;
  353. }
  354. #endif
  355. return 0;
  356. }
  357. static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
  358. {
  359. unsigned short val;
  360. bfin_sir_stop_rx(port);
  361. val = UART_GET_GCTL(port);
  362. val &= ~(UCEN | UMOD_MASK | RPOLC);
  363. UART_PUT_GCTL(port, val);
  364. #ifdef CONFIG_SIR_BFIN_DMA
  365. disable_dma(port->tx_dma_channel);
  366. disable_dma(port->rx_dma_channel);
  367. del_timer(&(port->rx_dma_timer));
  368. dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
  369. #else
  370. free_irq(port->irq+1, dev);
  371. free_irq(port->irq, dev);
  372. #endif
  373. free_dma(port->tx_dma_channel);
  374. free_dma(port->rx_dma_channel);
  375. }
  376. #ifdef CONFIG_PM
  377. static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
  378. {
  379. struct bfin_sir_port *sir_port;
  380. struct net_device *dev;
  381. struct bfin_sir_self *self;
  382. sir_port = platform_get_drvdata(pdev);
  383. if (!sir_port)
  384. return 0;
  385. dev = sir_port->dev;
  386. self = netdev_priv(dev);
  387. if (self->open) {
  388. flush_work(&self->work);
  389. bfin_sir_shutdown(self->sir_port, dev);
  390. netif_device_detach(dev);
  391. }
  392. return 0;
  393. }
  394. static int bfin_sir_resume(struct platform_device *pdev)
  395. {
  396. struct bfin_sir_port *sir_port;
  397. struct net_device *dev;
  398. struct bfin_sir_self *self;
  399. struct bfin_sir_port *port;
  400. sir_port = platform_get_drvdata(pdev);
  401. if (!sir_port)
  402. return 0;
  403. dev = sir_port->dev;
  404. self = netdev_priv(dev);
  405. port = self->sir_port;
  406. if (self->open) {
  407. if (self->newspeed) {
  408. self->speed = self->newspeed;
  409. self->newspeed = 0;
  410. }
  411. bfin_sir_startup(port, dev);
  412. bfin_sir_set_speed(port, 9600);
  413. bfin_sir_enable_rx(port);
  414. netif_device_attach(dev);
  415. }
  416. return 0;
  417. }
  418. #else
  419. #define bfin_sir_suspend NULL
  420. #define bfin_sir_resume NULL
  421. #endif
  422. static void bfin_sir_send_work(struct work_struct *work)
  423. {
  424. struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work);
  425. struct net_device *dev = self->sir_port->dev;
  426. struct bfin_sir_port *port = self->sir_port;
  427. unsigned short val;
  428. int tx_cnt = 10;
  429. while (bfin_sir_is_receiving(dev) && --tx_cnt)
  430. turnaround_delay(dev->last_rx, self->mtt);
  431. bfin_sir_stop_rx(port);
  432. /* To avoid losting RX interrupt, we reset IR function before
  433. * sending data. We also can set the speed, which will
  434. * reset all the UART.
  435. */
  436. val = UART_GET_GCTL(port);
  437. val &= ~(UMOD_MASK | RPOLC);
  438. UART_PUT_GCTL(port, val);
  439. SSYNC();
  440. val |= UMOD_IRDA | RPOLC;
  441. UART_PUT_GCTL(port, val);
  442. SSYNC();
  443. /* bfin_sir_set_speed(port, self->speed); */
  444. #ifdef CONFIG_SIR_BFIN_DMA
  445. bfin_sir_dma_tx_chars(dev);
  446. #endif
  447. bfin_sir_enable_tx(port);
  448. dev->trans_start = jiffies;
  449. }
  450. static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
  451. {
  452. struct bfin_sir_self *self = netdev_priv(dev);
  453. int speed = irda_get_next_speed(skb);
  454. netif_stop_queue(dev);
  455. self->mtt = irda_get_mtt(skb);
  456. if (speed != self->speed && speed != -1)
  457. self->newspeed = speed;
  458. self->tx_buff.data = self->tx_buff.head;
  459. if (skb->len == 0)
  460. self->tx_buff.len = 0;
  461. else
  462. self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
  463. schedule_work(&self->work);
  464. dev_kfree_skb(skb);
  465. return 0;
  466. }
  467. static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
  468. {
  469. struct if_irda_req *rq = (struct if_irda_req *)ifreq;
  470. struct bfin_sir_self *self = netdev_priv(dev);
  471. struct bfin_sir_port *port = self->sir_port;
  472. int ret = 0;
  473. switch (cmd) {
  474. case SIOCSBANDWIDTH:
  475. if (capable(CAP_NET_ADMIN)) {
  476. if (self->open) {
  477. ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
  478. bfin_sir_enable_rx(port);
  479. } else {
  480. dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
  481. ret = 0;
  482. }
  483. }
  484. break;
  485. case SIOCSMEDIABUSY:
  486. ret = -EPERM;
  487. if (capable(CAP_NET_ADMIN)) {
  488. irda_device_set_media_busy(dev, TRUE);
  489. ret = 0;
  490. }
  491. break;
  492. case SIOCGRECEIVING:
  493. rq->ifr_receiving = bfin_sir_is_receiving(dev);
  494. break;
  495. default:
  496. ret = -EOPNOTSUPP;
  497. break;
  498. }
  499. return ret;
  500. }
  501. static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
  502. {
  503. struct bfin_sir_self *self = netdev_priv(dev);
  504. return &self->stats;
  505. }
  506. static int bfin_sir_open(struct net_device *dev)
  507. {
  508. struct bfin_sir_self *self = netdev_priv(dev);
  509. struct bfin_sir_port *port = self->sir_port;
  510. int err;
  511. self->newspeed = 0;
  512. self->speed = 9600;
  513. spin_lock_init(&self->lock);
  514. err = bfin_sir_startup(port, dev);
  515. if (err)
  516. goto err_startup;
  517. bfin_sir_set_speed(port, 9600);
  518. self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
  519. if (!self->irlap) {
  520. err = -ENOMEM;
  521. goto err_irlap;
  522. }
  523. INIT_WORK(&self->work, bfin_sir_send_work);
  524. /*
  525. * Now enable the interrupt then start the queue
  526. */
  527. self->open = 1;
  528. bfin_sir_enable_rx(port);
  529. netif_start_queue(dev);
  530. return 0;
  531. err_irlap:
  532. self->open = 0;
  533. bfin_sir_shutdown(port, dev);
  534. err_startup:
  535. return err;
  536. }
  537. static int bfin_sir_stop(struct net_device *dev)
  538. {
  539. struct bfin_sir_self *self = netdev_priv(dev);
  540. flush_work(&self->work);
  541. bfin_sir_shutdown(self->sir_port, dev);
  542. if (self->rxskb) {
  543. dev_kfree_skb(self->rxskb);
  544. self->rxskb = NULL;
  545. }
  546. /* Stop IrLAP */
  547. if (self->irlap) {
  548. irlap_close(self->irlap);
  549. self->irlap = NULL;
  550. }
  551. netif_stop_queue(dev);
  552. self->open = 0;
  553. return 0;
  554. }
  555. static int bfin_sir_init_iobuf(iobuff_t *io, int size)
  556. {
  557. io->head = kmalloc(size, GFP_KERNEL);
  558. if (!io->head)
  559. return -ENOMEM;
  560. io->truesize = size;
  561. io->in_frame = FALSE;
  562. io->state = OUTSIDE_FRAME;
  563. io->data = io->head;
  564. return 0;
  565. }
  566. static const struct net_device_ops bfin_sir_ndo = {
  567. .ndo_open = bfin_sir_open,
  568. .ndo_stop = bfin_sir_stop,
  569. .ndo_start_xmit = bfin_sir_hard_xmit,
  570. .ndo_do_ioctl = bfin_sir_ioctl,
  571. .ndo_get_stats = bfin_sir_stats,
  572. };
  573. static int bfin_sir_probe(struct platform_device *pdev)
  574. {
  575. struct net_device *dev;
  576. struct bfin_sir_self *self;
  577. unsigned int baudrate_mask;
  578. struct bfin_sir_port *sir_port;
  579. int err;
  580. if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
  581. per[pdev->id][3] == pdev->id) {
  582. err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
  583. if (err)
  584. return err;
  585. } else {
  586. dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
  587. return -ENODEV;
  588. }
  589. err = -ENOMEM;
  590. sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
  591. if (!sir_port)
  592. goto err_mem_0;
  593. bfin_sir_init_ports(sir_port, pdev);
  594. dev = alloc_irdadev(sizeof(*self));
  595. if (!dev)
  596. goto err_mem_1;
  597. self = netdev_priv(dev);
  598. self->dev = &pdev->dev;
  599. self->sir_port = sir_port;
  600. sir_port->dev = dev;
  601. err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
  602. if (err)
  603. goto err_mem_2;
  604. err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
  605. if (err)
  606. goto err_mem_3;
  607. dev->netdev_ops = &bfin_sir_ndo;
  608. dev->irq = sir_port->irq;
  609. irda_init_max_qos_capabilies(&self->qos);
  610. baudrate_mask = IR_9600;
  611. switch (max_rate) {
  612. case 115200:
  613. baudrate_mask |= IR_115200;
  614. case 57600:
  615. baudrate_mask |= IR_57600;
  616. case 38400:
  617. baudrate_mask |= IR_38400;
  618. case 19200:
  619. baudrate_mask |= IR_19200;
  620. case 9600:
  621. break;
  622. default:
  623. dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
  624. }
  625. self->qos.baud_rate.bits &= baudrate_mask;
  626. self->qos.min_turn_time.bits = 1; /* 10 ms or more */
  627. irda_qos_bits_to_value(&self->qos);
  628. err = register_netdev(dev);
  629. if (err) {
  630. kfree(self->tx_buff.head);
  631. err_mem_3:
  632. kfree(self->rx_buff.head);
  633. err_mem_2:
  634. free_netdev(dev);
  635. err_mem_1:
  636. kfree(sir_port);
  637. err_mem_0:
  638. peripheral_free_list(per[pdev->id]);
  639. } else
  640. platform_set_drvdata(pdev, sir_port);
  641. return err;
  642. }
  643. static int bfin_sir_remove(struct platform_device *pdev)
  644. {
  645. struct bfin_sir_port *sir_port;
  646. struct net_device *dev = NULL;
  647. struct bfin_sir_self *self;
  648. sir_port = platform_get_drvdata(pdev);
  649. if (!sir_port)
  650. return 0;
  651. dev = sir_port->dev;
  652. self = netdev_priv(dev);
  653. unregister_netdev(dev);
  654. kfree(self->tx_buff.head);
  655. kfree(self->rx_buff.head);
  656. free_netdev(dev);
  657. kfree(sir_port);
  658. return 0;
  659. }
  660. static struct platform_driver bfin_ir_driver = {
  661. .probe = bfin_sir_probe,
  662. .remove = bfin_sir_remove,
  663. .suspend = bfin_sir_suspend,
  664. .resume = bfin_sir_resume,
  665. .driver = {
  666. .name = DRIVER_NAME,
  667. },
  668. };
  669. module_platform_driver(bfin_ir_driver);
  670. module_param(max_rate, int, 0);
  671. MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
  672. MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
  673. MODULE_DESCRIPTION("Blackfin IrDA driver");
  674. MODULE_LICENSE("GPL");