ucb1x00-core.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. /*
  2. * linux/drivers/mfd/ucb1x00-core.c
  3. *
  4. * Copyright (C) 2001 Russell King, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License.
  9. *
  10. * The UCB1x00 core driver provides basic services for handling IO,
  11. * the ADC, interrupts, and accessing registers. It is designed
  12. * such that everything goes through this layer, thereby providing
  13. * a consistent locking methodology, as well as allowing the drivers
  14. * to be used on other non-MCP-enabled hardware platforms.
  15. *
  16. * Note that all locks are private to this file. Nothing else may
  17. * touch them.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/sched.h>
  22. #include <linux/slab.h>
  23. #include <linux/init.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/irq.h>
  27. #include <linux/device.h>
  28. #include <linux/mutex.h>
  29. #include <linux/mfd/ucb1x00.h>
  30. #include <linux/pm.h>
  31. #include <linux/gpio.h>
  32. static DEFINE_MUTEX(ucb1x00_mutex);
  33. static LIST_HEAD(ucb1x00_drivers);
  34. static LIST_HEAD(ucb1x00_devices);
  35. /**
  36. * ucb1x00_io_set_dir - set IO direction
  37. * @ucb: UCB1x00 structure describing chip
  38. * @in: bitfield of IO pins to be set as inputs
  39. * @out: bitfield of IO pins to be set as outputs
  40. *
  41. * Set the IO direction of the ten general purpose IO pins on
  42. * the UCB1x00 chip. The @in bitfield has priority over the
  43. * @out bitfield, in that if you specify a pin as both input
  44. * and output, it will end up as an input.
  45. *
  46. * ucb1x00_enable must have been called to enable the comms
  47. * before using this function.
  48. *
  49. * This function takes a spinlock, disabling interrupts.
  50. */
  51. void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out)
  52. {
  53. unsigned long flags;
  54. spin_lock_irqsave(&ucb->io_lock, flags);
  55. ucb->io_dir |= out;
  56. ucb->io_dir &= ~in;
  57. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  58. spin_unlock_irqrestore(&ucb->io_lock, flags);
  59. }
  60. /**
  61. * ucb1x00_io_write - set or clear IO outputs
  62. * @ucb: UCB1x00 structure describing chip
  63. * @set: bitfield of IO pins to set to logic '1'
  64. * @clear: bitfield of IO pins to set to logic '0'
  65. *
  66. * Set the IO output state of the specified IO pins. The value
  67. * is retained if the pins are subsequently configured as inputs.
  68. * The @clear bitfield has priority over the @set bitfield -
  69. * outputs will be cleared.
  70. *
  71. * ucb1x00_enable must have been called to enable the comms
  72. * before using this function.
  73. *
  74. * This function takes a spinlock, disabling interrupts.
  75. */
  76. void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
  77. {
  78. unsigned long flags;
  79. spin_lock_irqsave(&ucb->io_lock, flags);
  80. ucb->io_out |= set;
  81. ucb->io_out &= ~clear;
  82. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  83. spin_unlock_irqrestore(&ucb->io_lock, flags);
  84. }
  85. /**
  86. * ucb1x00_io_read - read the current state of the IO pins
  87. * @ucb: UCB1x00 structure describing chip
  88. *
  89. * Return a bitfield describing the logic state of the ten
  90. * general purpose IO pins.
  91. *
  92. * ucb1x00_enable must have been called to enable the comms
  93. * before using this function.
  94. *
  95. * This function does not take any mutexes or spinlocks.
  96. */
  97. unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
  98. {
  99. return ucb1x00_reg_read(ucb, UCB_IO_DATA);
  100. }
  101. static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
  102. {
  103. struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
  104. unsigned long flags;
  105. spin_lock_irqsave(&ucb->io_lock, flags);
  106. if (value)
  107. ucb->io_out |= 1 << offset;
  108. else
  109. ucb->io_out &= ~(1 << offset);
  110. ucb1x00_enable(ucb);
  111. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  112. ucb1x00_disable(ucb);
  113. spin_unlock_irqrestore(&ucb->io_lock, flags);
  114. }
  115. static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
  116. {
  117. struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
  118. unsigned val;
  119. ucb1x00_enable(ucb);
  120. val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
  121. ucb1x00_disable(ucb);
  122. return val & (1 << offset);
  123. }
  124. static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
  125. {
  126. struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
  127. unsigned long flags;
  128. spin_lock_irqsave(&ucb->io_lock, flags);
  129. ucb->io_dir &= ~(1 << offset);
  130. ucb1x00_enable(ucb);
  131. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  132. ucb1x00_disable(ucb);
  133. spin_unlock_irqrestore(&ucb->io_lock, flags);
  134. return 0;
  135. }
  136. static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
  137. , int value)
  138. {
  139. struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
  140. unsigned long flags;
  141. unsigned old, mask = 1 << offset;
  142. spin_lock_irqsave(&ucb->io_lock, flags);
  143. old = ucb->io_out;
  144. if (value)
  145. ucb->io_out |= mask;
  146. else
  147. ucb->io_out &= ~mask;
  148. ucb1x00_enable(ucb);
  149. if (old != ucb->io_out)
  150. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  151. if (!(ucb->io_dir & mask)) {
  152. ucb->io_dir |= mask;
  153. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  154. }
  155. ucb1x00_disable(ucb);
  156. spin_unlock_irqrestore(&ucb->io_lock, flags);
  157. return 0;
  158. }
  159. static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
  160. {
  161. struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
  162. return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
  163. }
  164. /*
  165. * UCB1300 data sheet says we must:
  166. * 1. enable ADC => 5us (including reference startup time)
  167. * 2. select input => 51*tsibclk => 4.3us
  168. * 3. start conversion => 102*tsibclk => 8.5us
  169. * (tsibclk = 1/11981000)
  170. * Period between SIB 128-bit frames = 10.7us
  171. */
  172. /**
  173. * ucb1x00_adc_enable - enable the ADC converter
  174. * @ucb: UCB1x00 structure describing chip
  175. *
  176. * Enable the ucb1x00 and ADC converter on the UCB1x00 for use.
  177. * Any code wishing to use the ADC converter must call this
  178. * function prior to using it.
  179. *
  180. * This function takes the ADC mutex to prevent two or more
  181. * concurrent uses, and therefore may sleep. As a result, it
  182. * can only be called from process context, not interrupt
  183. * context.
  184. *
  185. * You should release the ADC as soon as possible using
  186. * ucb1x00_adc_disable.
  187. */
  188. void ucb1x00_adc_enable(struct ucb1x00 *ucb)
  189. {
  190. mutex_lock(&ucb->adc_mutex);
  191. ucb->adc_cr |= UCB_ADC_ENA;
  192. ucb1x00_enable(ucb);
  193. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
  194. }
  195. /**
  196. * ucb1x00_adc_read - read the specified ADC channel
  197. * @ucb: UCB1x00 structure describing chip
  198. * @adc_channel: ADC channel mask
  199. * @sync: wait for syncronisation pulse.
  200. *
  201. * Start an ADC conversion and wait for the result. Note that
  202. * synchronised ADC conversions (via the ADCSYNC pin) must wait
  203. * until the trigger is asserted and the conversion is finished.
  204. *
  205. * This function currently spins waiting for the conversion to
  206. * complete (2 frames max without sync).
  207. *
  208. * If called for a synchronised ADC conversion, it may sleep
  209. * with the ADC mutex held.
  210. */
  211. unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
  212. {
  213. unsigned int val;
  214. if (sync)
  215. adc_channel |= UCB_ADC_SYNC_ENA;
  216. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel);
  217. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START);
  218. for (;;) {
  219. val = ucb1x00_reg_read(ucb, UCB_ADC_DATA);
  220. if (val & UCB_ADC_DAT_VAL)
  221. break;
  222. /* yield to other processes */
  223. set_current_state(TASK_INTERRUPTIBLE);
  224. schedule_timeout(1);
  225. }
  226. return UCB_ADC_DAT(val);
  227. }
  228. /**
  229. * ucb1x00_adc_disable - disable the ADC converter
  230. * @ucb: UCB1x00 structure describing chip
  231. *
  232. * Disable the ADC converter and release the ADC mutex.
  233. */
  234. void ucb1x00_adc_disable(struct ucb1x00 *ucb)
  235. {
  236. ucb->adc_cr &= ~UCB_ADC_ENA;
  237. ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
  238. ucb1x00_disable(ucb);
  239. mutex_unlock(&ucb->adc_mutex);
  240. }
  241. /*
  242. * UCB1x00 Interrupt handling.
  243. *
  244. * The UCB1x00 can generate interrupts when the SIBCLK is stopped.
  245. * Since we need to read an internal register, we must re-enable
  246. * SIBCLK to talk to the chip. We leave the clock running until
  247. * we have finished processing all interrupts from the chip.
  248. */
  249. static void ucb1x00_irq(struct irq_desc *desc)
  250. {
  251. struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
  252. unsigned int isr, i;
  253. ucb1x00_enable(ucb);
  254. isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS);
  255. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
  256. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  257. for (i = 0; i < 16 && isr; i++, isr >>= 1)
  258. if (isr & 1)
  259. generic_handle_irq(ucb->irq_base + i);
  260. ucb1x00_disable(ucb);
  261. }
  262. static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
  263. {
  264. ucb1x00_enable(ucb);
  265. if (ucb->irq_ris_enbl & mask)
  266. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  267. ucb->irq_mask);
  268. if (ucb->irq_fal_enbl & mask)
  269. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  270. ucb->irq_mask);
  271. ucb1x00_disable(ucb);
  272. }
  273. static void ucb1x00_irq_noop(struct irq_data *data)
  274. {
  275. }
  276. static void ucb1x00_irq_mask(struct irq_data *data)
  277. {
  278. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  279. unsigned mask = 1 << (data->irq - ucb->irq_base);
  280. raw_spin_lock(&ucb->irq_lock);
  281. ucb->irq_mask &= ~mask;
  282. ucb1x00_irq_update(ucb, mask);
  283. raw_spin_unlock(&ucb->irq_lock);
  284. }
  285. static void ucb1x00_irq_unmask(struct irq_data *data)
  286. {
  287. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  288. unsigned mask = 1 << (data->irq - ucb->irq_base);
  289. raw_spin_lock(&ucb->irq_lock);
  290. ucb->irq_mask |= mask;
  291. ucb1x00_irq_update(ucb, mask);
  292. raw_spin_unlock(&ucb->irq_lock);
  293. }
  294. static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
  295. {
  296. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  297. unsigned mask = 1 << (data->irq - ucb->irq_base);
  298. raw_spin_lock(&ucb->irq_lock);
  299. if (type & IRQ_TYPE_EDGE_RISING)
  300. ucb->irq_ris_enbl |= mask;
  301. else
  302. ucb->irq_ris_enbl &= ~mask;
  303. if (type & IRQ_TYPE_EDGE_FALLING)
  304. ucb->irq_fal_enbl |= mask;
  305. else
  306. ucb->irq_fal_enbl &= ~mask;
  307. if (ucb->irq_mask & mask) {
  308. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  309. ucb->irq_mask);
  310. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  311. ucb->irq_mask);
  312. }
  313. raw_spin_unlock(&ucb->irq_lock);
  314. return 0;
  315. }
  316. static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
  317. {
  318. struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
  319. struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
  320. unsigned mask = 1 << (data->irq - ucb->irq_base);
  321. if (!pdata || !pdata->can_wakeup)
  322. return -EINVAL;
  323. raw_spin_lock(&ucb->irq_lock);
  324. if (on)
  325. ucb->irq_wake |= mask;
  326. else
  327. ucb->irq_wake &= ~mask;
  328. raw_spin_unlock(&ucb->irq_lock);
  329. return 0;
  330. }
  331. static struct irq_chip ucb1x00_irqchip = {
  332. .name = "ucb1x00",
  333. .irq_ack = ucb1x00_irq_noop,
  334. .irq_mask = ucb1x00_irq_mask,
  335. .irq_unmask = ucb1x00_irq_unmask,
  336. .irq_set_type = ucb1x00_irq_set_type,
  337. .irq_set_wake = ucb1x00_irq_set_wake,
  338. };
  339. static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
  340. {
  341. struct ucb1x00_dev *dev;
  342. int ret;
  343. dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL);
  344. if (!dev)
  345. return -ENOMEM;
  346. dev->ucb = ucb;
  347. dev->drv = drv;
  348. ret = drv->add(dev);
  349. if (ret) {
  350. kfree(dev);
  351. return ret;
  352. }
  353. list_add_tail(&dev->dev_node, &ucb->devs);
  354. list_add_tail(&dev->drv_node, &drv->devs);
  355. return ret;
  356. }
  357. static void ucb1x00_remove_dev(struct ucb1x00_dev *dev)
  358. {
  359. dev->drv->remove(dev);
  360. list_del(&dev->dev_node);
  361. list_del(&dev->drv_node);
  362. kfree(dev);
  363. }
  364. /*
  365. * Try to probe our interrupt, rather than relying on lots of
  366. * hard-coded machine dependencies. For reference, the expected
  367. * IRQ mappings are:
  368. *
  369. * Machine Default IRQ
  370. * adsbitsy IRQ_GPCIN4
  371. * cerf IRQ_GPIO_UCB1200_IRQ
  372. * flexanet IRQ_GPIO_GUI
  373. * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ
  374. * graphicsclient ADS_EXT_IRQ(8)
  375. * graphicsmaster ADS_EXT_IRQ(8)
  376. * lart LART_IRQ_UCB1200
  377. * omnimeter IRQ_GPIO23
  378. * pfs168 IRQ_GPIO_UCB1300_IRQ
  379. * simpad IRQ_GPIO_UCB1300_IRQ
  380. * shannon SHANNON_IRQ_GPIO_IRQ_CODEC
  381. * yopy IRQ_GPIO_UCB1200_IRQ
  382. */
  383. static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
  384. {
  385. unsigned long mask;
  386. mask = probe_irq_on();
  387. if (!mask) {
  388. probe_irq_off(mask);
  389. return NO_IRQ;
  390. }
  391. /*
  392. * Enable the ADC interrupt.
  393. */
  394. ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
  395. ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
  396. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
  397. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  398. /*
  399. * Cause an ADC interrupt.
  400. */
  401. ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
  402. ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
  403. /*
  404. * Wait for the conversion to complete.
  405. */
  406. while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0);
  407. ucb1x00_reg_write(ucb, UCB_ADC_CR, 0);
  408. /*
  409. * Disable and clear interrupt.
  410. */
  411. ucb1x00_reg_write(ucb, UCB_IE_RIS, 0);
  412. ucb1x00_reg_write(ucb, UCB_IE_FAL, 0);
  413. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
  414. ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
  415. /*
  416. * Read triggered interrupt.
  417. */
  418. return probe_irq_off(mask);
  419. }
  420. static void ucb1x00_release(struct device *dev)
  421. {
  422. struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);
  423. kfree(ucb);
  424. }
  425. static struct class ucb1x00_class = {
  426. .name = "ucb1x00",
  427. .dev_release = ucb1x00_release,
  428. };
  429. static int ucb1x00_probe(struct mcp *mcp)
  430. {
  431. struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
  432. struct ucb1x00_driver *drv;
  433. struct ucb1x00 *ucb;
  434. unsigned id, i, irq_base;
  435. int ret = -ENODEV;
  436. /* Tell the platform to deassert the UCB1x00 reset */
  437. if (pdata && pdata->reset)
  438. pdata->reset(UCB_RST_PROBE);
  439. mcp_enable(mcp);
  440. id = mcp_reg_read(mcp, UCB_ID);
  441. mcp_disable(mcp);
  442. if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
  443. printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
  444. goto out;
  445. }
  446. ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
  447. ret = -ENOMEM;
  448. if (!ucb)
  449. goto out;
  450. device_initialize(&ucb->dev);
  451. ucb->dev.class = &ucb1x00_class;
  452. ucb->dev.parent = &mcp->attached_device;
  453. dev_set_name(&ucb->dev, "ucb1x00");
  454. raw_spin_lock_init(&ucb->irq_lock);
  455. spin_lock_init(&ucb->io_lock);
  456. mutex_init(&ucb->adc_mutex);
  457. ucb->id = id;
  458. ucb->mcp = mcp;
  459. ret = device_add(&ucb->dev);
  460. if (ret)
  461. goto err_dev_add;
  462. ucb1x00_enable(ucb);
  463. ucb->irq = ucb1x00_detect_irq(ucb);
  464. ucb1x00_disable(ucb);
  465. if (ucb->irq == NO_IRQ) {
  466. dev_err(&ucb->dev, "IRQ probe failed\n");
  467. ret = -ENODEV;
  468. goto err_no_irq;
  469. }
  470. ucb->gpio.base = -1;
  471. irq_base = pdata ? pdata->irq_base : 0;
  472. ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
  473. if (ucb->irq_base < 0) {
  474. dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
  475. ucb->irq_base);
  476. ret = ucb->irq_base;
  477. goto err_irq_alloc;
  478. }
  479. for (i = 0; i < 16; i++) {
  480. unsigned irq = ucb->irq_base + i;
  481. irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
  482. irq_set_chip_data(irq, ucb);
  483. irq_clear_status_flags(irq, IRQ_NOREQUEST);
  484. }
  485. irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
  486. irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb);
  487. if (pdata && pdata->gpio_base) {
  488. ucb->gpio.label = dev_name(&ucb->dev);
  489. ucb->gpio.dev = &ucb->dev;
  490. ucb->gpio.owner = THIS_MODULE;
  491. ucb->gpio.base = pdata->gpio_base;
  492. ucb->gpio.ngpio = 10;
  493. ucb->gpio.set = ucb1x00_gpio_set;
  494. ucb->gpio.get = ucb1x00_gpio_get;
  495. ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
  496. ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
  497. ucb->gpio.to_irq = ucb1x00_to_irq;
  498. ret = gpiochip_add(&ucb->gpio);
  499. if (ret)
  500. goto err_gpio_add;
  501. } else
  502. dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
  503. mcp_set_drvdata(mcp, ucb);
  504. if (pdata)
  505. device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
  506. INIT_LIST_HEAD(&ucb->devs);
  507. mutex_lock(&ucb1x00_mutex);
  508. list_add_tail(&ucb->node, &ucb1x00_devices);
  509. list_for_each_entry(drv, &ucb1x00_drivers, node) {
  510. ucb1x00_add_dev(ucb, drv);
  511. }
  512. mutex_unlock(&ucb1x00_mutex);
  513. return ret;
  514. err_gpio_add:
  515. irq_set_chained_handler(ucb->irq, NULL);
  516. err_irq_alloc:
  517. if (ucb->irq_base > 0)
  518. irq_free_descs(ucb->irq_base, 16);
  519. err_no_irq:
  520. device_del(&ucb->dev);
  521. err_dev_add:
  522. put_device(&ucb->dev);
  523. out:
  524. if (pdata && pdata->reset)
  525. pdata->reset(UCB_RST_PROBE_FAIL);
  526. return ret;
  527. }
  528. static void ucb1x00_remove(struct mcp *mcp)
  529. {
  530. struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
  531. struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
  532. struct list_head *l, *n;
  533. mutex_lock(&ucb1x00_mutex);
  534. list_del(&ucb->node);
  535. list_for_each_safe(l, n, &ucb->devs) {
  536. struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
  537. ucb1x00_remove_dev(dev);
  538. }
  539. mutex_unlock(&ucb1x00_mutex);
  540. if (ucb->gpio.base != -1)
  541. gpiochip_remove(&ucb->gpio);
  542. irq_set_chained_handler(ucb->irq, NULL);
  543. irq_free_descs(ucb->irq_base, 16);
  544. device_unregister(&ucb->dev);
  545. if (pdata && pdata->reset)
  546. pdata->reset(UCB_RST_REMOVE);
  547. }
  548. int ucb1x00_register_driver(struct ucb1x00_driver *drv)
  549. {
  550. struct ucb1x00 *ucb;
  551. INIT_LIST_HEAD(&drv->devs);
  552. mutex_lock(&ucb1x00_mutex);
  553. list_add_tail(&drv->node, &ucb1x00_drivers);
  554. list_for_each_entry(ucb, &ucb1x00_devices, node) {
  555. ucb1x00_add_dev(ucb, drv);
  556. }
  557. mutex_unlock(&ucb1x00_mutex);
  558. return 0;
  559. }
  560. void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
  561. {
  562. struct list_head *n, *l;
  563. mutex_lock(&ucb1x00_mutex);
  564. list_del(&drv->node);
  565. list_for_each_safe(l, n, &drv->devs) {
  566. struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
  567. ucb1x00_remove_dev(dev);
  568. }
  569. mutex_unlock(&ucb1x00_mutex);
  570. }
  571. #ifdef CONFIG_PM_SLEEP
  572. static int ucb1x00_suspend(struct device *dev)
  573. {
  574. struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
  575. struct ucb1x00 *ucb = dev_get_drvdata(dev);
  576. struct ucb1x00_dev *udev;
  577. mutex_lock(&ucb1x00_mutex);
  578. list_for_each_entry(udev, &ucb->devs, dev_node) {
  579. if (udev->drv->suspend)
  580. udev->drv->suspend(udev);
  581. }
  582. mutex_unlock(&ucb1x00_mutex);
  583. if (ucb->irq_wake) {
  584. unsigned long flags;
  585. raw_spin_lock_irqsave(&ucb->irq_lock, flags);
  586. ucb1x00_enable(ucb);
  587. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  588. ucb->irq_wake);
  589. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  590. ucb->irq_wake);
  591. ucb1x00_disable(ucb);
  592. raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
  593. enable_irq_wake(ucb->irq);
  594. } else if (pdata && pdata->reset)
  595. pdata->reset(UCB_RST_SUSPEND);
  596. return 0;
  597. }
  598. static int ucb1x00_resume(struct device *dev)
  599. {
  600. struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
  601. struct ucb1x00 *ucb = dev_get_drvdata(dev);
  602. struct ucb1x00_dev *udev;
  603. if (!ucb->irq_wake && pdata && pdata->reset)
  604. pdata->reset(UCB_RST_RESUME);
  605. ucb1x00_enable(ucb);
  606. ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
  607. ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
  608. if (ucb->irq_wake) {
  609. unsigned long flags;
  610. raw_spin_lock_irqsave(&ucb->irq_lock, flags);
  611. ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
  612. ucb->irq_mask);
  613. ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
  614. ucb->irq_mask);
  615. raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
  616. disable_irq_wake(ucb->irq);
  617. }
  618. ucb1x00_disable(ucb);
  619. mutex_lock(&ucb1x00_mutex);
  620. list_for_each_entry(udev, &ucb->devs, dev_node) {
  621. if (udev->drv->resume)
  622. udev->drv->resume(udev);
  623. }
  624. mutex_unlock(&ucb1x00_mutex);
  625. return 0;
  626. }
  627. #endif
  628. static SIMPLE_DEV_PM_OPS(ucb1x00_pm_ops, ucb1x00_suspend, ucb1x00_resume);
  629. static struct mcp_driver ucb1x00_driver = {
  630. .drv = {
  631. .name = "ucb1x00",
  632. .owner = THIS_MODULE,
  633. .pm = &ucb1x00_pm_ops,
  634. },
  635. .probe = ucb1x00_probe,
  636. .remove = ucb1x00_remove,
  637. };
  638. static int __init ucb1x00_init(void)
  639. {
  640. int ret = class_register(&ucb1x00_class);
  641. if (ret == 0) {
  642. ret = mcp_driver_register(&ucb1x00_driver);
  643. if (ret)
  644. class_unregister(&ucb1x00_class);
  645. }
  646. return ret;
  647. }
  648. static void __exit ucb1x00_exit(void)
  649. {
  650. mcp_driver_unregister(&ucb1x00_driver);
  651. class_unregister(&ucb1x00_class);
  652. }
  653. module_init(ucb1x00_init);
  654. module_exit(ucb1x00_exit);
  655. EXPORT_SYMBOL(ucb1x00_io_set_dir);
  656. EXPORT_SYMBOL(ucb1x00_io_write);
  657. EXPORT_SYMBOL(ucb1x00_io_read);
  658. EXPORT_SYMBOL(ucb1x00_adc_enable);
  659. EXPORT_SYMBOL(ucb1x00_adc_read);
  660. EXPORT_SYMBOL(ucb1x00_adc_disable);
  661. EXPORT_SYMBOL(ucb1x00_register_driver);
  662. EXPORT_SYMBOL(ucb1x00_unregister_driver);
  663. MODULE_ALIAS("mcp:ucb1x00");
  664. MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
  665. MODULE_DESCRIPTION("UCB1x00 core driver");
  666. MODULE_LICENSE("GPL");