i2c-at91.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190
  1. /*
  2. * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
  3. *
  4. * Copyright (C) 2011 Weinmann Medical GmbH
  5. * Author: Nikolaus Voss <n.voss@weinmann.de>
  6. *
  7. * Evolved from original work by:
  8. * Copyright (C) 2004 Rick Bronson
  9. * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
  10. *
  11. * Borrowed heavily from original work by:
  12. * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/completion.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/err.h>
  24. #include <linux/i2c.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/of.h>
  29. #include <linux/of_device.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/slab.h>
  32. #include <linux/platform_data/dma-atmel.h>
  33. #include <linux/pm_runtime.h>
  34. #include <linux/pinctrl/consumer.h>
  35. #define DEFAULT_TWI_CLK_HZ 100000 /* max 400 Kbits/s */
  36. #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
  37. #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
  38. #define AUTOSUSPEND_TIMEOUT 2000
  39. /* AT91 TWI register definitions */
  40. #define AT91_TWI_CR 0x0000 /* Control Register */
  41. #define AT91_TWI_START BIT(0) /* Send a Start Condition */
  42. #define AT91_TWI_STOP BIT(1) /* Send a Stop Condition */
  43. #define AT91_TWI_MSEN BIT(2) /* Master Transfer Enable */
  44. #define AT91_TWI_MSDIS BIT(3) /* Master Transfer Disable */
  45. #define AT91_TWI_SVEN BIT(4) /* Slave Transfer Enable */
  46. #define AT91_TWI_SVDIS BIT(5) /* Slave Transfer Disable */
  47. #define AT91_TWI_QUICK BIT(6) /* SMBus quick command */
  48. #define AT91_TWI_SWRST BIT(7) /* Software Reset */
  49. #define AT91_TWI_ACMEN BIT(16) /* Alternative Command Mode Enable */
  50. #define AT91_TWI_ACMDIS BIT(17) /* Alternative Command Mode Disable */
  51. #define AT91_TWI_THRCLR BIT(24) /* Transmit Holding Register Clear */
  52. #define AT91_TWI_RHRCLR BIT(25) /* Receive Holding Register Clear */
  53. #define AT91_TWI_LOCKCLR BIT(26) /* Lock Clear */
  54. #define AT91_TWI_FIFOEN BIT(28) /* FIFO Enable */
  55. #define AT91_TWI_FIFODIS BIT(29) /* FIFO Disable */
  56. #define AT91_TWI_MMR 0x0004 /* Master Mode Register */
  57. #define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
  58. #define AT91_TWI_MREAD BIT(12) /* Master Read Direction */
  59. #define AT91_TWI_IADR 0x000c /* Internal Address Register */
  60. #define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
  61. #define AT91_TWI_SR 0x0020 /* Status Register */
  62. #define AT91_TWI_TXCOMP BIT(0) /* Transmission Complete */
  63. #define AT91_TWI_RXRDY BIT(1) /* Receive Holding Register Ready */
  64. #define AT91_TWI_TXRDY BIT(2) /* Transmit Holding Register Ready */
  65. #define AT91_TWI_OVRE BIT(6) /* Overrun Error */
  66. #define AT91_TWI_UNRE BIT(7) /* Underrun Error */
  67. #define AT91_TWI_NACK BIT(8) /* Not Acknowledged */
  68. #define AT91_TWI_LOCK BIT(23) /* TWI Lock due to Frame Errors */
  69. #define AT91_TWI_INT_MASK \
  70. (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
  71. #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
  72. #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
  73. #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
  74. #define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
  75. #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
  76. #define AT91_TWI_ACR 0x0040 /* Alternative Command Register */
  77. #define AT91_TWI_ACR_DATAL(len) ((len) & 0xff)
  78. #define AT91_TWI_ACR_DIR BIT(8)
  79. #define AT91_TWI_FMR 0x0050 /* FIFO Mode Register */
  80. #define AT91_TWI_FMR_TXRDYM(mode) (((mode) & 0x3) << 0)
  81. #define AT91_TWI_FMR_TXRDYM_MASK (0x3 << 0)
  82. #define AT91_TWI_FMR_RXRDYM(mode) (((mode) & 0x3) << 4)
  83. #define AT91_TWI_FMR_RXRDYM_MASK (0x3 << 4)
  84. #define AT91_TWI_ONE_DATA 0x0
  85. #define AT91_TWI_TWO_DATA 0x1
  86. #define AT91_TWI_FOUR_DATA 0x2
  87. #define AT91_TWI_FLR 0x0054 /* FIFO Level Register */
  88. #define AT91_TWI_FSR 0x0060 /* FIFO Status Register */
  89. #define AT91_TWI_FIER 0x0064 /* FIFO Interrupt Enable Register */
  90. #define AT91_TWI_FIDR 0x0068 /* FIFO Interrupt Disable Register */
  91. #define AT91_TWI_FIMR 0x006c /* FIFO Interrupt Mask Register */
  92. #define AT91_TWI_VER 0x00fc /* Version Register */
  93. struct at91_twi_pdata {
  94. unsigned clk_max_div;
  95. unsigned clk_offset;
  96. bool has_unre_flag;
  97. bool has_alt_cmd;
  98. struct at_dma_slave dma_slave;
  99. };
  100. struct at91_twi_dma {
  101. struct dma_chan *chan_rx;
  102. struct dma_chan *chan_tx;
  103. struct scatterlist sg[2];
  104. struct dma_async_tx_descriptor *data_desc;
  105. enum dma_data_direction direction;
  106. bool buf_mapped;
  107. bool xfer_in_progress;
  108. };
  109. struct at91_twi_dev {
  110. struct device *dev;
  111. void __iomem *base;
  112. struct completion cmd_complete;
  113. struct clk *clk;
  114. u8 *buf;
  115. size_t buf_len;
  116. struct i2c_msg *msg;
  117. int irq;
  118. unsigned imr;
  119. unsigned transfer_status;
  120. struct i2c_adapter adapter;
  121. unsigned twi_cwgr_reg;
  122. struct at91_twi_pdata *pdata;
  123. bool use_dma;
  124. bool recv_len_abort;
  125. u32 fifo_size;
  126. struct at91_twi_dma dma;
  127. };
  128. static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
  129. {
  130. return readl_relaxed(dev->base + reg);
  131. }
  132. static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
  133. {
  134. writel_relaxed(val, dev->base + reg);
  135. }
  136. static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
  137. {
  138. at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
  139. }
  140. static void at91_twi_irq_save(struct at91_twi_dev *dev)
  141. {
  142. dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
  143. at91_disable_twi_interrupts(dev);
  144. }
  145. static void at91_twi_irq_restore(struct at91_twi_dev *dev)
  146. {
  147. at91_twi_write(dev, AT91_TWI_IER, dev->imr);
  148. }
  149. static void at91_init_twi_bus(struct at91_twi_dev *dev)
  150. {
  151. at91_disable_twi_interrupts(dev);
  152. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
  153. /* FIFO should be enabled immediately after the software reset */
  154. if (dev->fifo_size)
  155. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
  156. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
  157. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
  158. at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
  159. }
  160. /*
  161. * Calculate symmetric clock as stated in datasheet:
  162. * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
  163. */
  164. static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
  165. {
  166. int ckdiv, cdiv, div;
  167. struct at91_twi_pdata *pdata = dev->pdata;
  168. int offset = pdata->clk_offset;
  169. int max_ckdiv = pdata->clk_max_div;
  170. div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
  171. 2 * twi_clk) - offset);
  172. ckdiv = fls(div >> 8);
  173. cdiv = div >> ckdiv;
  174. if (ckdiv > max_ckdiv) {
  175. dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
  176. ckdiv, max_ckdiv);
  177. ckdiv = max_ckdiv;
  178. cdiv = 255;
  179. }
  180. dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
  181. dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
  182. }
  183. static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
  184. {
  185. struct at91_twi_dma *dma = &dev->dma;
  186. at91_twi_irq_save(dev);
  187. if (dma->xfer_in_progress) {
  188. if (dma->direction == DMA_FROM_DEVICE)
  189. dmaengine_terminate_all(dma->chan_rx);
  190. else
  191. dmaengine_terminate_all(dma->chan_tx);
  192. dma->xfer_in_progress = false;
  193. }
  194. if (dma->buf_mapped) {
  195. dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
  196. dev->buf_len, dma->direction);
  197. dma->buf_mapped = false;
  198. }
  199. at91_twi_irq_restore(dev);
  200. }
  201. static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
  202. {
  203. if (!dev->buf_len)
  204. return;
  205. /* 8bit write works with and without FIFO */
  206. writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
  207. /* send stop when last byte has been written */
  208. if (--dev->buf_len == 0)
  209. if (!dev->pdata->has_alt_cmd)
  210. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  211. dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  212. ++dev->buf;
  213. }
  214. static void at91_twi_write_data_dma_callback(void *data)
  215. {
  216. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  217. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
  218. dev->buf_len, DMA_TO_DEVICE);
  219. /*
  220. * When this callback is called, THR/TX FIFO is likely not to be empty
  221. * yet. So we have to wait for TXCOMP or NACK bits to be set into the
  222. * Status Register to be sure that the STOP bit has been sent and the
  223. * transfer is completed. The NACK interrupt has already been enabled,
  224. * we just have to enable TXCOMP one.
  225. */
  226. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  227. if (!dev->pdata->has_alt_cmd)
  228. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  229. }
  230. static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
  231. {
  232. dma_addr_t dma_addr;
  233. struct dma_async_tx_descriptor *txdesc;
  234. struct at91_twi_dma *dma = &dev->dma;
  235. struct dma_chan *chan_tx = dma->chan_tx;
  236. unsigned int sg_len = 1;
  237. if (!dev->buf_len)
  238. return;
  239. dma->direction = DMA_TO_DEVICE;
  240. at91_twi_irq_save(dev);
  241. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
  242. DMA_TO_DEVICE);
  243. if (dma_mapping_error(dev->dev, dma_addr)) {
  244. dev_err(dev->dev, "dma map failed\n");
  245. return;
  246. }
  247. dma->buf_mapped = true;
  248. at91_twi_irq_restore(dev);
  249. if (dev->fifo_size) {
  250. size_t part1_len, part2_len;
  251. struct scatterlist *sg;
  252. unsigned fifo_mr;
  253. sg_len = 0;
  254. part1_len = dev->buf_len & ~0x3;
  255. if (part1_len) {
  256. sg = &dma->sg[sg_len++];
  257. sg_dma_len(sg) = part1_len;
  258. sg_dma_address(sg) = dma_addr;
  259. }
  260. part2_len = dev->buf_len & 0x3;
  261. if (part2_len) {
  262. sg = &dma->sg[sg_len++];
  263. sg_dma_len(sg) = part2_len;
  264. sg_dma_address(sg) = dma_addr + part1_len;
  265. }
  266. /*
  267. * DMA controller is triggered when at least 4 data can be
  268. * written into the TX FIFO
  269. */
  270. fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
  271. fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
  272. fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
  273. at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
  274. } else {
  275. sg_dma_len(&dma->sg[0]) = dev->buf_len;
  276. sg_dma_address(&dma->sg[0]) = dma_addr;
  277. }
  278. txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
  279. DMA_MEM_TO_DEV,
  280. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  281. if (!txdesc) {
  282. dev_err(dev->dev, "dma prep slave sg failed\n");
  283. goto error;
  284. }
  285. txdesc->callback = at91_twi_write_data_dma_callback;
  286. txdesc->callback_param = dev;
  287. dma->xfer_in_progress = true;
  288. dmaengine_submit(txdesc);
  289. dma_async_issue_pending(chan_tx);
  290. return;
  291. error:
  292. at91_twi_dma_cleanup(dev);
  293. }
  294. static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
  295. {
  296. /*
  297. * If we are in this case, it means there is garbage data in RHR, so
  298. * delete them.
  299. */
  300. if (!dev->buf_len) {
  301. at91_twi_read(dev, AT91_TWI_RHR);
  302. return;
  303. }
  304. /* 8bit read works with and without FIFO */
  305. *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
  306. --dev->buf_len;
  307. /* return if aborting, we only needed to read RHR to clear RXRDY*/
  308. if (dev->recv_len_abort)
  309. return;
  310. /* handle I2C_SMBUS_BLOCK_DATA */
  311. if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
  312. /* ensure length byte is a valid value */
  313. if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
  314. dev->msg->flags &= ~I2C_M_RECV_LEN;
  315. dev->buf_len += *dev->buf;
  316. dev->msg->len = dev->buf_len + 1;
  317. dev_dbg(dev->dev, "received block length %d\n",
  318. dev->buf_len);
  319. } else {
  320. /* abort and send the stop by reading one more byte */
  321. dev->recv_len_abort = true;
  322. dev->buf_len = 1;
  323. }
  324. }
  325. /* send stop if second but last byte has been read */
  326. if (!dev->pdata->has_alt_cmd && dev->buf_len == 1)
  327. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  328. dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  329. ++dev->buf;
  330. }
  331. static void at91_twi_read_data_dma_callback(void *data)
  332. {
  333. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  334. unsigned ier = AT91_TWI_TXCOMP;
  335. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
  336. dev->buf_len, DMA_FROM_DEVICE);
  337. if (!dev->pdata->has_alt_cmd) {
  338. /* The last two bytes have to be read without using dma */
  339. dev->buf += dev->buf_len - 2;
  340. dev->buf_len = 2;
  341. ier |= AT91_TWI_RXRDY;
  342. }
  343. at91_twi_write(dev, AT91_TWI_IER, ier);
  344. }
  345. static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
  346. {
  347. dma_addr_t dma_addr;
  348. struct dma_async_tx_descriptor *rxdesc;
  349. struct at91_twi_dma *dma = &dev->dma;
  350. struct dma_chan *chan_rx = dma->chan_rx;
  351. size_t buf_len;
  352. buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
  353. dma->direction = DMA_FROM_DEVICE;
  354. /* Keep in mind that we won't use dma to read the last two bytes */
  355. at91_twi_irq_save(dev);
  356. dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
  357. if (dma_mapping_error(dev->dev, dma_addr)) {
  358. dev_err(dev->dev, "dma map failed\n");
  359. return;
  360. }
  361. dma->buf_mapped = true;
  362. at91_twi_irq_restore(dev);
  363. if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
  364. unsigned fifo_mr;
  365. /*
  366. * DMA controller is triggered when at least 4 data can be
  367. * read from the RX FIFO
  368. */
  369. fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
  370. fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
  371. fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
  372. at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
  373. }
  374. sg_dma_len(&dma->sg[0]) = buf_len;
  375. sg_dma_address(&dma->sg[0]) = dma_addr;
  376. rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
  377. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  378. if (!rxdesc) {
  379. dev_err(dev->dev, "dma prep slave sg failed\n");
  380. goto error;
  381. }
  382. rxdesc->callback = at91_twi_read_data_dma_callback;
  383. rxdesc->callback_param = dev;
  384. dma->xfer_in_progress = true;
  385. dmaengine_submit(rxdesc);
  386. dma_async_issue_pending(dma->chan_rx);
  387. return;
  388. error:
  389. at91_twi_dma_cleanup(dev);
  390. }
  391. static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
  392. {
  393. struct at91_twi_dev *dev = dev_id;
  394. const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
  395. const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
  396. if (!irqstatus)
  397. return IRQ_NONE;
  398. /*
  399. * In reception, the behavior of the twi device (before sama5d2) is
  400. * weird. There is some magic about RXRDY flag! When a data has been
  401. * almost received, the reception of a new one is anticipated if there
  402. * is no stop command to send. That is the reason why ask for sending
  403. * the stop command not on the last data but on the second last one.
  404. *
  405. * Unfortunately, we could still have the RXRDY flag set even if the
  406. * transfer is done and we have read the last data. It might happen
  407. * when the i2c slave device sends too quickly data after receiving the
  408. * ack from the master. The data has been almost received before having
  409. * the order to send stop. In this case, sending the stop command could
  410. * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
  411. * the RXRDY interrupt first in order to not keep garbage data in the
  412. * Receive Holding Register for the next transfer.
  413. */
  414. if (irqstatus & AT91_TWI_RXRDY)
  415. at91_twi_read_next_byte(dev);
  416. /*
  417. * When a NACK condition is detected, the I2C controller sets the NACK,
  418. * TXCOMP and TXRDY bits all together in the Status Register (SR).
  419. *
  420. * 1 - Handling NACK errors with CPU write transfer.
  421. *
  422. * In such case, we should not write the next byte into the Transmit
  423. * Holding Register (THR) otherwise the I2C controller would start a new
  424. * transfer and the I2C slave is likely to reply by another NACK.
  425. *
  426. * 2 - Handling NACK errors with DMA write transfer.
  427. *
  428. * By setting the TXRDY bit in the SR, the I2C controller also triggers
  429. * the DMA controller to write the next data into the THR. Then the
  430. * result depends on the hardware version of the I2C controller.
  431. *
  432. * 2a - Without support of the Alternative Command mode.
  433. *
  434. * This is the worst case: the DMA controller is triggered to write the
  435. * next data into the THR, hence starting a new transfer: the I2C slave
  436. * is likely to reply by another NACK.
  437. * Concurrently, this interrupt handler is likely to be called to manage
  438. * the first NACK before the I2C controller detects the second NACK and
  439. * sets once again the NACK bit into the SR.
  440. * When handling the first NACK, this interrupt handler disables the I2C
  441. * controller interruptions, especially the NACK interrupt.
  442. * Hence, the NACK bit is pending into the SR. This is why we should
  443. * read the SR to clear all pending interrupts at the beginning of
  444. * at91_do_twi_transfer() before actually starting a new transfer.
  445. *
  446. * 2b - With support of the Alternative Command mode.
  447. *
  448. * When a NACK condition is detected, the I2C controller also locks the
  449. * THR (and sets the LOCK bit in the SR): even though the DMA controller
  450. * is triggered by the TXRDY bit to write the next data into the THR,
  451. * this data actually won't go on the I2C bus hence a second NACK is not
  452. * generated.
  453. */
  454. if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
  455. at91_disable_twi_interrupts(dev);
  456. complete(&dev->cmd_complete);
  457. } else if (irqstatus & AT91_TWI_TXRDY) {
  458. at91_twi_write_next_byte(dev);
  459. }
  460. /* catch error flags */
  461. dev->transfer_status |= status;
  462. return IRQ_HANDLED;
  463. }
  464. static int at91_do_twi_transfer(struct at91_twi_dev *dev)
  465. {
  466. int ret;
  467. unsigned long time_left;
  468. bool has_unre_flag = dev->pdata->has_unre_flag;
  469. bool has_alt_cmd = dev->pdata->has_alt_cmd;
  470. /*
  471. * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
  472. * read flag but shows the state of the transmission at the time the
  473. * Status Register is read. According to the programmer datasheet,
  474. * TXCOMP is set when both holding register and internal shifter are
  475. * empty and STOP condition has been sent.
  476. * Consequently, we should enable NACK interrupt rather than TXCOMP to
  477. * detect transmission failure.
  478. * Indeed let's take the case of an i2c write command using DMA.
  479. * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
  480. * TXCOMP bits are set together into the Status Register.
  481. * LOCK is a clear on write bit, which is set to prevent the DMA
  482. * controller from sending new data on the i2c bus after a NACK
  483. * condition has happened. Once locked, this i2c peripheral stops
  484. * triggering the DMA controller for new data but it is more than
  485. * likely that a new DMA transaction is already in progress, writing
  486. * into the Transmit Holding Register. Since the peripheral is locked,
  487. * these new data won't be sent to the i2c bus but they will remain
  488. * into the Transmit Holding Register, so TXCOMP bit is cleared.
  489. * Then when the interrupt handler is called, the Status Register is
  490. * read: the TXCOMP bit is clear but NACK bit is still set. The driver
  491. * manage the error properly, without waiting for timeout.
  492. * This case can be reproduced easyly when writing into an at24 eeprom.
  493. *
  494. * Besides, the TXCOMP bit is already set before the i2c transaction
  495. * has been started. For read transactions, this bit is cleared when
  496. * writing the START bit into the Control Register. So the
  497. * corresponding interrupt can safely be enabled just after.
  498. * However for write transactions managed by the CPU, we first write
  499. * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
  500. * interrupt. If TXCOMP interrupt were enabled before writing into THR,
  501. * the interrupt handler would be called immediately and the i2c command
  502. * would be reported as completed.
  503. * Also when a write transaction is managed by the DMA controller,
  504. * enabling the TXCOMP interrupt in this function may lead to a race
  505. * condition since we don't know whether the TXCOMP interrupt is enabled
  506. * before or after the DMA has started to write into THR. So the TXCOMP
  507. * interrupt is enabled later by at91_twi_write_data_dma_callback().
  508. * Immediately after in that DMA callback, if the alternative command
  509. * mode is not used, we still need to send the STOP condition manually
  510. * writing the corresponding bit into the Control Register.
  511. */
  512. dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
  513. (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
  514. reinit_completion(&dev->cmd_complete);
  515. dev->transfer_status = 0;
  516. /* Clear pending interrupts, such as NACK. */
  517. at91_twi_read(dev, AT91_TWI_SR);
  518. if (dev->fifo_size) {
  519. unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
  520. /* Reset FIFO mode register */
  521. fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
  522. AT91_TWI_FMR_RXRDYM_MASK);
  523. fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
  524. fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
  525. at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
  526. /* Flush FIFOs */
  527. at91_twi_write(dev, AT91_TWI_CR,
  528. AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
  529. }
  530. if (!dev->buf_len) {
  531. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
  532. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  533. } else if (dev->msg->flags & I2C_M_RD) {
  534. unsigned start_flags = AT91_TWI_START;
  535. /* if only one byte is to be read, immediately stop transfer */
  536. if (!has_alt_cmd && dev->buf_len <= 1 &&
  537. !(dev->msg->flags & I2C_M_RECV_LEN))
  538. start_flags |= AT91_TWI_STOP;
  539. at91_twi_write(dev, AT91_TWI_CR, start_flags);
  540. /*
  541. * When using dma without alternative command mode, the last
  542. * byte has to be read manually in order to not send the stop
  543. * command too late and then to receive extra data.
  544. * In practice, there are some issues if you use the dma to
  545. * read n-1 bytes because of latency.
  546. * Reading n-2 bytes with dma and the two last ones manually
  547. * seems to be the best solution.
  548. */
  549. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  550. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
  551. at91_twi_read_data_dma(dev);
  552. } else {
  553. at91_twi_write(dev, AT91_TWI_IER,
  554. AT91_TWI_TXCOMP |
  555. AT91_TWI_NACK |
  556. AT91_TWI_RXRDY);
  557. }
  558. } else {
  559. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  560. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
  561. at91_twi_write_data_dma(dev);
  562. } else {
  563. at91_twi_write_next_byte(dev);
  564. at91_twi_write(dev, AT91_TWI_IER,
  565. AT91_TWI_TXCOMP |
  566. AT91_TWI_NACK |
  567. AT91_TWI_TXRDY);
  568. }
  569. }
  570. time_left = wait_for_completion_timeout(&dev->cmd_complete,
  571. dev->adapter.timeout);
  572. if (time_left == 0) {
  573. dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
  574. dev_err(dev->dev, "controller timed out\n");
  575. at91_init_twi_bus(dev);
  576. ret = -ETIMEDOUT;
  577. goto error;
  578. }
  579. if (dev->transfer_status & AT91_TWI_NACK) {
  580. dev_dbg(dev->dev, "received nack\n");
  581. ret = -EREMOTEIO;
  582. goto error;
  583. }
  584. if (dev->transfer_status & AT91_TWI_OVRE) {
  585. dev_err(dev->dev, "overrun while reading\n");
  586. ret = -EIO;
  587. goto error;
  588. }
  589. if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
  590. dev_err(dev->dev, "underrun while writing\n");
  591. ret = -EIO;
  592. goto error;
  593. }
  594. if ((has_alt_cmd || dev->fifo_size) &&
  595. (dev->transfer_status & AT91_TWI_LOCK)) {
  596. dev_err(dev->dev, "tx locked\n");
  597. ret = -EIO;
  598. goto error;
  599. }
  600. if (dev->recv_len_abort) {
  601. dev_err(dev->dev, "invalid smbus block length recvd\n");
  602. ret = -EPROTO;
  603. goto error;
  604. }
  605. dev_dbg(dev->dev, "transfer complete\n");
  606. return 0;
  607. error:
  608. /* first stop DMA transfer if still in progress */
  609. at91_twi_dma_cleanup(dev);
  610. /* then flush THR/FIFO and unlock TX if locked */
  611. if ((has_alt_cmd || dev->fifo_size) &&
  612. (dev->transfer_status & AT91_TWI_LOCK)) {
  613. dev_dbg(dev->dev, "unlock tx\n");
  614. at91_twi_write(dev, AT91_TWI_CR,
  615. AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
  616. }
  617. return ret;
  618. }
  619. static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
  620. {
  621. struct at91_twi_dev *dev = i2c_get_adapdata(adap);
  622. int ret;
  623. unsigned int_addr_flag = 0;
  624. struct i2c_msg *m_start = msg;
  625. bool is_read, use_alt_cmd = false;
  626. dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
  627. ret = pm_runtime_get_sync(dev->dev);
  628. if (ret < 0)
  629. goto out;
  630. if (num == 2) {
  631. int internal_address = 0;
  632. int i;
  633. /* 1st msg is put into the internal address, start with 2nd */
  634. m_start = &msg[1];
  635. for (i = 0; i < msg->len; ++i) {
  636. const unsigned addr = msg->buf[msg->len - 1 - i];
  637. internal_address |= addr << (8 * i);
  638. int_addr_flag += AT91_TWI_IADRSZ_1;
  639. }
  640. at91_twi_write(dev, AT91_TWI_IADR, internal_address);
  641. }
  642. is_read = (m_start->flags & I2C_M_RD);
  643. if (dev->pdata->has_alt_cmd) {
  644. if (m_start->len > 0) {
  645. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
  646. at91_twi_write(dev, AT91_TWI_ACR,
  647. AT91_TWI_ACR_DATAL(m_start->len) |
  648. ((is_read) ? AT91_TWI_ACR_DIR : 0));
  649. use_alt_cmd = true;
  650. } else {
  651. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
  652. }
  653. }
  654. at91_twi_write(dev, AT91_TWI_MMR,
  655. (m_start->addr << 16) |
  656. int_addr_flag |
  657. ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
  658. dev->buf_len = m_start->len;
  659. dev->buf = m_start->buf;
  660. dev->msg = m_start;
  661. dev->recv_len_abort = false;
  662. ret = at91_do_twi_transfer(dev);
  663. ret = (ret < 0) ? ret : num;
  664. out:
  665. pm_runtime_mark_last_busy(dev->dev);
  666. pm_runtime_put_autosuspend(dev->dev);
  667. return ret;
  668. }
  669. /*
  670. * The hardware can handle at most two messages concatenated by a
  671. * repeated start via it's internal address feature.
  672. */
  673. static struct i2c_adapter_quirks at91_twi_quirks = {
  674. .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
  675. .max_comb_1st_msg_len = 3,
  676. };
  677. static u32 at91_twi_func(struct i2c_adapter *adapter)
  678. {
  679. return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
  680. | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
  681. }
  682. static struct i2c_algorithm at91_twi_algorithm = {
  683. .master_xfer = at91_twi_xfer,
  684. .functionality = at91_twi_func,
  685. };
  686. static struct at91_twi_pdata at91rm9200_config = {
  687. .clk_max_div = 5,
  688. .clk_offset = 3,
  689. .has_unre_flag = true,
  690. .has_alt_cmd = false,
  691. };
  692. static struct at91_twi_pdata at91sam9261_config = {
  693. .clk_max_div = 5,
  694. .clk_offset = 4,
  695. .has_unre_flag = false,
  696. .has_alt_cmd = false,
  697. };
  698. static struct at91_twi_pdata at91sam9260_config = {
  699. .clk_max_div = 7,
  700. .clk_offset = 4,
  701. .has_unre_flag = false,
  702. .has_alt_cmd = false,
  703. };
  704. static struct at91_twi_pdata at91sam9g20_config = {
  705. .clk_max_div = 7,
  706. .clk_offset = 4,
  707. .has_unre_flag = false,
  708. .has_alt_cmd = false,
  709. };
  710. static struct at91_twi_pdata at91sam9g10_config = {
  711. .clk_max_div = 7,
  712. .clk_offset = 4,
  713. .has_unre_flag = false,
  714. .has_alt_cmd = false,
  715. };
  716. static const struct platform_device_id at91_twi_devtypes[] = {
  717. {
  718. .name = "i2c-at91rm9200",
  719. .driver_data = (unsigned long) &at91rm9200_config,
  720. }, {
  721. .name = "i2c-at91sam9261",
  722. .driver_data = (unsigned long) &at91sam9261_config,
  723. }, {
  724. .name = "i2c-at91sam9260",
  725. .driver_data = (unsigned long) &at91sam9260_config,
  726. }, {
  727. .name = "i2c-at91sam9g20",
  728. .driver_data = (unsigned long) &at91sam9g20_config,
  729. }, {
  730. .name = "i2c-at91sam9g10",
  731. .driver_data = (unsigned long) &at91sam9g10_config,
  732. }, {
  733. /* sentinel */
  734. }
  735. };
  736. #if defined(CONFIG_OF)
  737. static struct at91_twi_pdata at91sam9x5_config = {
  738. .clk_max_div = 7,
  739. .clk_offset = 4,
  740. .has_unre_flag = false,
  741. .has_alt_cmd = false,
  742. };
  743. static struct at91_twi_pdata sama5d2_config = {
  744. .clk_max_div = 7,
  745. .clk_offset = 4,
  746. .has_unre_flag = true,
  747. .has_alt_cmd = true,
  748. };
  749. static const struct of_device_id atmel_twi_dt_ids[] = {
  750. {
  751. .compatible = "atmel,at91rm9200-i2c",
  752. .data = &at91rm9200_config,
  753. } , {
  754. .compatible = "atmel,at91sam9260-i2c",
  755. .data = &at91sam9260_config,
  756. } , {
  757. .compatible = "atmel,at91sam9261-i2c",
  758. .data = &at91sam9261_config,
  759. } , {
  760. .compatible = "atmel,at91sam9g20-i2c",
  761. .data = &at91sam9g20_config,
  762. } , {
  763. .compatible = "atmel,at91sam9g10-i2c",
  764. .data = &at91sam9g10_config,
  765. }, {
  766. .compatible = "atmel,at91sam9x5-i2c",
  767. .data = &at91sam9x5_config,
  768. }, {
  769. .compatible = "atmel,sama5d2-i2c",
  770. .data = &sama5d2_config,
  771. }, {
  772. /* sentinel */
  773. }
  774. };
  775. MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
  776. #endif
  777. static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
  778. {
  779. int ret = 0;
  780. struct dma_slave_config slave_config;
  781. struct at91_twi_dma *dma = &dev->dma;
  782. enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  783. /*
  784. * The actual width of the access will be chosen in
  785. * dmaengine_prep_slave_sg():
  786. * for each buffer in the scatter-gather list, if its size is aligned
  787. * to addr_width then addr_width accesses will be performed to transfer
  788. * the buffer. On the other hand, if the buffer size is not aligned to
  789. * addr_width then the buffer is transferred using single byte accesses.
  790. * Please refer to the Atmel eXtended DMA controller driver.
  791. * When FIFOs are used, the TXRDYM threshold can always be set to
  792. * trigger the XDMAC when at least 4 data can be written into the TX
  793. * FIFO, even if single byte accesses are performed.
  794. * However the RXRDYM threshold must be set to fit the access width,
  795. * deduced from buffer length, so the XDMAC is triggered properly to
  796. * read data from the RX FIFO.
  797. */
  798. if (dev->fifo_size)
  799. addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  800. memset(&slave_config, 0, sizeof(slave_config));
  801. slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
  802. slave_config.src_addr_width = addr_width;
  803. slave_config.src_maxburst = 1;
  804. slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
  805. slave_config.dst_addr_width = addr_width;
  806. slave_config.dst_maxburst = 1;
  807. slave_config.device_fc = false;
  808. dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
  809. if (IS_ERR(dma->chan_tx)) {
  810. ret = PTR_ERR(dma->chan_tx);
  811. dma->chan_tx = NULL;
  812. goto error;
  813. }
  814. dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
  815. if (IS_ERR(dma->chan_rx)) {
  816. ret = PTR_ERR(dma->chan_rx);
  817. dma->chan_rx = NULL;
  818. goto error;
  819. }
  820. slave_config.direction = DMA_MEM_TO_DEV;
  821. if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
  822. dev_err(dev->dev, "failed to configure tx channel\n");
  823. ret = -EINVAL;
  824. goto error;
  825. }
  826. slave_config.direction = DMA_DEV_TO_MEM;
  827. if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
  828. dev_err(dev->dev, "failed to configure rx channel\n");
  829. ret = -EINVAL;
  830. goto error;
  831. }
  832. sg_init_table(dma->sg, 2);
  833. dma->buf_mapped = false;
  834. dma->xfer_in_progress = false;
  835. dev->use_dma = true;
  836. dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
  837. dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
  838. return ret;
  839. error:
  840. if (ret != -EPROBE_DEFER)
  841. dev_info(dev->dev, "can't use DMA, error %d\n", ret);
  842. if (dma->chan_rx)
  843. dma_release_channel(dma->chan_rx);
  844. if (dma->chan_tx)
  845. dma_release_channel(dma->chan_tx);
  846. return ret;
  847. }
  848. static struct at91_twi_pdata *at91_twi_get_driver_data(
  849. struct platform_device *pdev)
  850. {
  851. if (pdev->dev.of_node) {
  852. const struct of_device_id *match;
  853. match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
  854. if (!match)
  855. return NULL;
  856. return (struct at91_twi_pdata *)match->data;
  857. }
  858. return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
  859. }
  860. static int at91_twi_probe(struct platform_device *pdev)
  861. {
  862. struct at91_twi_dev *dev;
  863. struct resource *mem;
  864. int rc;
  865. u32 phy_addr;
  866. u32 bus_clk_rate;
  867. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  868. if (!dev)
  869. return -ENOMEM;
  870. init_completion(&dev->cmd_complete);
  871. dev->dev = &pdev->dev;
  872. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  873. if (!mem)
  874. return -ENODEV;
  875. phy_addr = mem->start;
  876. dev->pdata = at91_twi_get_driver_data(pdev);
  877. if (!dev->pdata)
  878. return -ENODEV;
  879. dev->base = devm_ioremap_resource(&pdev->dev, mem);
  880. if (IS_ERR(dev->base))
  881. return PTR_ERR(dev->base);
  882. dev->irq = platform_get_irq(pdev, 0);
  883. if (dev->irq < 0)
  884. return dev->irq;
  885. rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
  886. dev_name(dev->dev), dev);
  887. if (rc) {
  888. dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
  889. return rc;
  890. }
  891. platform_set_drvdata(pdev, dev);
  892. dev->clk = devm_clk_get(dev->dev, NULL);
  893. if (IS_ERR(dev->clk)) {
  894. dev_err(dev->dev, "no clock defined\n");
  895. return -ENODEV;
  896. }
  897. clk_prepare_enable(dev->clk);
  898. if (dev->dev->of_node) {
  899. rc = at91_twi_configure_dma(dev, phy_addr);
  900. if (rc == -EPROBE_DEFER)
  901. return rc;
  902. }
  903. if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
  904. &dev->fifo_size)) {
  905. dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
  906. }
  907. rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
  908. &bus_clk_rate);
  909. if (rc)
  910. bus_clk_rate = DEFAULT_TWI_CLK_HZ;
  911. at91_calc_twi_clock(dev, bus_clk_rate);
  912. at91_init_twi_bus(dev);
  913. snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
  914. i2c_set_adapdata(&dev->adapter, dev);
  915. dev->adapter.owner = THIS_MODULE;
  916. dev->adapter.class = I2C_CLASS_DEPRECATED;
  917. dev->adapter.algo = &at91_twi_algorithm;
  918. dev->adapter.quirks = &at91_twi_quirks;
  919. dev->adapter.dev.parent = dev->dev;
  920. dev->adapter.nr = pdev->id;
  921. dev->adapter.timeout = AT91_I2C_TIMEOUT;
  922. dev->adapter.dev.of_node = pdev->dev.of_node;
  923. pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
  924. pm_runtime_use_autosuspend(dev->dev);
  925. pm_runtime_set_active(dev->dev);
  926. pm_runtime_enable(dev->dev);
  927. rc = i2c_add_numbered_adapter(&dev->adapter);
  928. if (rc) {
  929. dev_err(dev->dev, "Adapter %s registration failed\n",
  930. dev->adapter.name);
  931. clk_disable_unprepare(dev->clk);
  932. pm_runtime_disable(dev->dev);
  933. pm_runtime_set_suspended(dev->dev);
  934. return rc;
  935. }
  936. dev_info(dev->dev, "AT91 i2c bus driver (hw version: %#x).\n",
  937. at91_twi_read(dev, AT91_TWI_VER));
  938. return 0;
  939. }
  940. static int at91_twi_remove(struct platform_device *pdev)
  941. {
  942. struct at91_twi_dev *dev = platform_get_drvdata(pdev);
  943. i2c_del_adapter(&dev->adapter);
  944. clk_disable_unprepare(dev->clk);
  945. pm_runtime_disable(dev->dev);
  946. pm_runtime_set_suspended(dev->dev);
  947. return 0;
  948. }
  949. #ifdef CONFIG_PM
  950. static int at91_twi_runtime_suspend(struct device *dev)
  951. {
  952. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  953. clk_disable_unprepare(twi_dev->clk);
  954. pinctrl_pm_select_sleep_state(dev);
  955. return 0;
  956. }
  957. static int at91_twi_runtime_resume(struct device *dev)
  958. {
  959. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  960. pinctrl_pm_select_default_state(dev);
  961. return clk_prepare_enable(twi_dev->clk);
  962. }
  963. static int at91_twi_suspend_noirq(struct device *dev)
  964. {
  965. if (!pm_runtime_status_suspended(dev))
  966. at91_twi_runtime_suspend(dev);
  967. return 0;
  968. }
  969. static int at91_twi_resume_noirq(struct device *dev)
  970. {
  971. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  972. int ret;
  973. if (!pm_runtime_status_suspended(dev)) {
  974. ret = at91_twi_runtime_resume(dev);
  975. if (ret)
  976. return ret;
  977. }
  978. pm_runtime_mark_last_busy(dev);
  979. pm_request_autosuspend(dev);
  980. at91_init_twi_bus(twi_dev);
  981. return 0;
  982. }
  983. static const struct dev_pm_ops at91_twi_pm = {
  984. .suspend_noirq = at91_twi_suspend_noirq,
  985. .resume_noirq = at91_twi_resume_noirq,
  986. .runtime_suspend = at91_twi_runtime_suspend,
  987. .runtime_resume = at91_twi_runtime_resume,
  988. };
  989. #define at91_twi_pm_ops (&at91_twi_pm)
  990. #else
  991. #define at91_twi_pm_ops NULL
  992. #endif
  993. static struct platform_driver at91_twi_driver = {
  994. .probe = at91_twi_probe,
  995. .remove = at91_twi_remove,
  996. .id_table = at91_twi_devtypes,
  997. .driver = {
  998. .name = "at91_i2c",
  999. .of_match_table = of_match_ptr(atmel_twi_dt_ids),
  1000. .pm = at91_twi_pm_ops,
  1001. },
  1002. };
  1003. static int __init at91_twi_init(void)
  1004. {
  1005. return platform_driver_register(&at91_twi_driver);
  1006. }
  1007. static void __exit at91_twi_exit(void)
  1008. {
  1009. platform_driver_unregister(&at91_twi_driver);
  1010. }
  1011. subsys_initcall(at91_twi_init);
  1012. module_exit(at91_twi_exit);
  1013. MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
  1014. MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
  1015. MODULE_LICENSE("GPL");
  1016. MODULE_ALIAS("platform:at91_i2c");