nuvoton-cir.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231
  1. /*
  2. * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
  3. *
  4. * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
  5. * Copyright (C) 2009 Nuvoton PS Team
  6. *
  7. * Special thanks to Nuvoton for providing hardware, spec sheets and
  8. * sample code upon which portions of this driver are based. Indirect
  9. * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10. * modeled after.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  25. * USA
  26. */
  27. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/pnp.h>
  31. #include <linux/io.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/sched.h>
  34. #include <linux/slab.h>
  35. #include <media/rc-core.h>
  36. #include <linux/pci_ids.h>
  37. #include "nuvoton-cir.h"
  38. /* write val to config reg */
  39. static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  40. {
  41. outb(reg, nvt->cr_efir);
  42. outb(val, nvt->cr_efdr);
  43. }
  44. /* read val from config reg */
  45. static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  46. {
  47. outb(reg, nvt->cr_efir);
  48. return inb(nvt->cr_efdr);
  49. }
  50. /* update config register bit without changing other bits */
  51. static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  52. {
  53. u8 tmp = nvt_cr_read(nvt, reg) | val;
  54. nvt_cr_write(nvt, tmp, reg);
  55. }
  56. /* clear config register bit without changing other bits */
  57. static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  58. {
  59. u8 tmp = nvt_cr_read(nvt, reg) & ~val;
  60. nvt_cr_write(nvt, tmp, reg);
  61. }
  62. /* enter extended function mode */
  63. static inline void nvt_efm_enable(struct nvt_dev *nvt)
  64. {
  65. /* Enabling Extended Function Mode explicitly requires writing 2x */
  66. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  67. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  68. }
  69. /* exit extended function mode */
  70. static inline void nvt_efm_disable(struct nvt_dev *nvt)
  71. {
  72. outb(EFER_EFM_DISABLE, nvt->cr_efir);
  73. }
  74. /*
  75. * When you want to address a specific logical device, write its logical
  76. * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  77. * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  78. */
  79. static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  80. {
  81. outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
  82. outb(ldev, nvt->cr_efdr);
  83. }
  84. /* write val to cir config register */
  85. static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  86. {
  87. outb(val, nvt->cir_addr + offset);
  88. }
  89. /* read val from cir config register */
  90. static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
  91. {
  92. u8 val;
  93. val = inb(nvt->cir_addr + offset);
  94. return val;
  95. }
  96. /* write val to cir wake register */
  97. static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
  98. u8 val, u8 offset)
  99. {
  100. outb(val, nvt->cir_wake_addr + offset);
  101. }
  102. /* read val from cir wake config register */
  103. static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
  104. {
  105. u8 val;
  106. val = inb(nvt->cir_wake_addr + offset);
  107. return val;
  108. }
  109. /* dump current cir register contents */
  110. static void cir_dump_regs(struct nvt_dev *nvt)
  111. {
  112. nvt_efm_enable(nvt);
  113. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  114. pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
  115. pr_info(" * CR CIR ACTIVE : 0x%x\n",
  116. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  117. pr_info(" * CR CIR BASE ADDR: 0x%x\n",
  118. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  119. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  120. pr_info(" * CR CIR IRQ NUM: 0x%x\n",
  121. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  122. nvt_efm_disable(nvt);
  123. pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
  124. pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
  125. pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
  126. pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
  127. pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
  128. pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
  129. pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
  130. pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
  131. pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
  132. pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
  133. pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
  134. pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
  135. pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
  136. pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
  137. pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
  138. pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
  139. pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
  140. }
  141. /* dump current cir wake register contents */
  142. static void cir_wake_dump_regs(struct nvt_dev *nvt)
  143. {
  144. u8 i, fifo_len;
  145. nvt_efm_enable(nvt);
  146. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  147. pr_info("%s: Dump CIR WAKE logical device registers:\n",
  148. NVT_DRIVER_NAME);
  149. pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n",
  150. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  151. pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
  152. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  153. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  154. pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n",
  155. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  156. nvt_efm_disable(nvt);
  157. pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
  158. pr_info(" * IRCON: 0x%x\n",
  159. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
  160. pr_info(" * IRSTS: 0x%x\n",
  161. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
  162. pr_info(" * IREN: 0x%x\n",
  163. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
  164. pr_info(" * FIFO CMP DEEP: 0x%x\n",
  165. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
  166. pr_info(" * FIFO CMP TOL: 0x%x\n",
  167. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
  168. pr_info(" * FIFO COUNT: 0x%x\n",
  169. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
  170. pr_info(" * SLCH: 0x%x\n",
  171. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
  172. pr_info(" * SLCL: 0x%x\n",
  173. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
  174. pr_info(" * FIFOCON: 0x%x\n",
  175. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
  176. pr_info(" * SRXFSTS: 0x%x\n",
  177. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
  178. pr_info(" * SAMPLE RX FIFO: 0x%x\n",
  179. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
  180. pr_info(" * WR FIFO DATA: 0x%x\n",
  181. nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
  182. pr_info(" * RD FIFO ONLY: 0x%x\n",
  183. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  184. pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
  185. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
  186. pr_info(" * FIFO IGNORE: 0x%x\n",
  187. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
  188. pr_info(" * IRFSM: 0x%x\n",
  189. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
  190. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  191. pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
  192. pr_info("* Contents =");
  193. for (i = 0; i < fifo_len; i++)
  194. pr_cont(" %02x",
  195. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  196. pr_cont("\n");
  197. }
  198. /* detect hardware features */
  199. static int nvt_hw_detect(struct nvt_dev *nvt)
  200. {
  201. unsigned long flags;
  202. u8 chip_major, chip_minor;
  203. char chip_id[12];
  204. bool chip_unknown = false;
  205. nvt_efm_enable(nvt);
  206. /* Check if we're wired for the alternate EFER setup */
  207. chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  208. if (chip_major == 0xff) {
  209. nvt->cr_efir = CR_EFIR2;
  210. nvt->cr_efdr = CR_EFDR2;
  211. nvt_efm_enable(nvt);
  212. chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  213. }
  214. chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
  215. /* these are the known working chip revisions... */
  216. switch (chip_major) {
  217. case CHIP_ID_HIGH_667:
  218. strcpy(chip_id, "w83667hg\0");
  219. if (chip_minor != CHIP_ID_LOW_667)
  220. chip_unknown = true;
  221. break;
  222. case CHIP_ID_HIGH_677B:
  223. strcpy(chip_id, "w83677hg\0");
  224. if (chip_minor != CHIP_ID_LOW_677B2 &&
  225. chip_minor != CHIP_ID_LOW_677B3)
  226. chip_unknown = true;
  227. break;
  228. case CHIP_ID_HIGH_677C:
  229. strcpy(chip_id, "w83677hg-c\0");
  230. if (chip_minor != CHIP_ID_LOW_677C)
  231. chip_unknown = true;
  232. break;
  233. default:
  234. strcpy(chip_id, "w836x7hg\0");
  235. chip_unknown = true;
  236. break;
  237. }
  238. /* warn, but still let the driver load, if we don't know this chip */
  239. if (chip_unknown)
  240. nvt_pr(KERN_WARNING, "%s: unknown chip, id: 0x%02x 0x%02x, "
  241. "it may not work...", chip_id, chip_major, chip_minor);
  242. else
  243. nvt_dbg("%s: chip id: 0x%02x 0x%02x",
  244. chip_id, chip_major, chip_minor);
  245. nvt_efm_disable(nvt);
  246. spin_lock_irqsave(&nvt->nvt_lock, flags);
  247. nvt->chip_major = chip_major;
  248. nvt->chip_minor = chip_minor;
  249. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  250. return 0;
  251. }
  252. static void nvt_cir_ldev_init(struct nvt_dev *nvt)
  253. {
  254. u8 val, psreg, psmask, psval;
  255. if (nvt->chip_major == CHIP_ID_HIGH_667) {
  256. psreg = CR_MULTIFUNC_PIN_SEL;
  257. psmask = MULTIFUNC_PIN_SEL_MASK;
  258. psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
  259. } else {
  260. psreg = CR_OUTPUT_PIN_SEL;
  261. psmask = OUTPUT_PIN_SEL_MASK;
  262. psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
  263. }
  264. /* output pin selection: enable CIR, with WB sensor enabled */
  265. val = nvt_cr_read(nvt, psreg);
  266. val &= psmask;
  267. val |= psval;
  268. nvt_cr_write(nvt, val, psreg);
  269. /* Select CIR logical device and enable */
  270. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  271. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  272. nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
  273. nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
  274. nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
  275. nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
  276. nvt->cir_addr, nvt->cir_irq);
  277. }
  278. static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
  279. {
  280. /* Select ACPI logical device, enable it and CIR Wake */
  281. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  282. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  283. /* Enable CIR Wake via PSOUT# (Pin60) */
  284. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  285. /* enable pme interrupt of cir wakeup event */
  286. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  287. /* Select CIR Wake logical device and enable */
  288. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  289. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  290. nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
  291. nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
  292. nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
  293. nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
  294. nvt->cir_wake_addr, nvt->cir_wake_irq);
  295. }
  296. /* clear out the hardware's cir rx fifo */
  297. static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
  298. {
  299. u8 val;
  300. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  301. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  302. }
  303. /* clear out the hardware's cir wake rx fifo */
  304. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
  305. {
  306. u8 val;
  307. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
  308. nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
  309. CIR_WAKE_FIFOCON);
  310. }
  311. /* clear out the hardware's cir tx fifo */
  312. static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
  313. {
  314. u8 val;
  315. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  316. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
  317. }
  318. /* enable RX Trigger Level Reach and Packet End interrupts */
  319. static void nvt_set_cir_iren(struct nvt_dev *nvt)
  320. {
  321. u8 iren;
  322. iren = CIR_IREN_RTR | CIR_IREN_PE;
  323. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  324. }
  325. static void nvt_cir_regs_init(struct nvt_dev *nvt)
  326. {
  327. /* set sample limit count (PE interrupt raised when reached) */
  328. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
  329. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
  330. /* set fifo irq trigger levels */
  331. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
  332. CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
  333. /*
  334. * Enable TX and RX, specify carrier on = low, off = high, and set
  335. * sample period (currently 50us)
  336. */
  337. nvt_cir_reg_write(nvt,
  338. CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  339. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  340. CIR_IRCON);
  341. /* clear hardware rx and tx fifos */
  342. nvt_clear_cir_fifo(nvt);
  343. nvt_clear_tx_fifo(nvt);
  344. /* clear any and all stray interrupts */
  345. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  346. /* and finally, enable interrupts */
  347. nvt_set_cir_iren(nvt);
  348. }
  349. static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
  350. {
  351. /* set number of bytes needed for wake from s3 (default 65) */
  352. nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
  353. CIR_WAKE_FIFO_CMP_DEEP);
  354. /* set tolerance/variance allowed per byte during wake compare */
  355. nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
  356. CIR_WAKE_FIFO_CMP_TOL);
  357. /* set sample limit count (PE interrupt raised when reached) */
  358. nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
  359. nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
  360. /* set cir wake fifo rx trigger level (currently 67) */
  361. nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
  362. CIR_WAKE_FIFOCON);
  363. /*
  364. * Enable TX and RX, specific carrier on = low, off = high, and set
  365. * sample period (currently 50us)
  366. */
  367. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  368. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  369. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  370. CIR_WAKE_IRCON);
  371. /* clear cir wake rx fifo */
  372. nvt_clear_cir_wake_fifo(nvt);
  373. /* clear any and all stray interrupts */
  374. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  375. }
  376. static void nvt_enable_wake(struct nvt_dev *nvt)
  377. {
  378. nvt_efm_enable(nvt);
  379. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  380. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  381. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  382. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  383. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  384. nvt_efm_disable(nvt);
  385. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  386. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  387. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  388. CIR_WAKE_IRCON);
  389. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  390. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  391. }
  392. #if 0 /* Currently unused */
  393. /* rx carrier detect only works in learning mode, must be called w/nvt_lock */
  394. static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
  395. {
  396. u32 count, carrier, duration = 0;
  397. int i;
  398. count = nvt_cir_reg_read(nvt, CIR_FCCL) |
  399. nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
  400. for (i = 0; i < nvt->pkts; i++) {
  401. if (nvt->buf[i] & BUF_PULSE_BIT)
  402. duration += nvt->buf[i] & BUF_LEN_MASK;
  403. }
  404. duration *= SAMPLE_PERIOD;
  405. if (!count || !duration) {
  406. nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)",
  407. count, duration);
  408. return 0;
  409. }
  410. carrier = MS_TO_NS(count) / duration;
  411. if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
  412. nvt_dbg("WTF? Carrier frequency out of range!");
  413. nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
  414. carrier, count, duration);
  415. return carrier;
  416. }
  417. #endif
  418. /*
  419. * set carrier frequency
  420. *
  421. * set carrier on 2 registers: CP & CC
  422. * always set CP as 0x81
  423. * set CC by SPEC, CC = 3MHz/carrier - 1
  424. */
  425. static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
  426. {
  427. struct nvt_dev *nvt = dev->priv;
  428. u16 val;
  429. if (carrier == 0)
  430. return -EINVAL;
  431. nvt_cir_reg_write(nvt, 1, CIR_CP);
  432. val = 3000000 / (carrier) - 1;
  433. nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
  434. nvt_dbg("cp: 0x%x cc: 0x%x\n",
  435. nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
  436. return 0;
  437. }
  438. /*
  439. * nvt_tx_ir
  440. *
  441. * 1) clean TX fifo first (handled by AP)
  442. * 2) copy data from user space
  443. * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
  444. * 4) send 9 packets to TX FIFO to open TTR
  445. * in interrupt_handler:
  446. * 5) send all data out
  447. * go back to write():
  448. * 6) disable TX interrupts, re-enable RX interupts
  449. *
  450. * The key problem of this function is user space data may larger than
  451. * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
  452. * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
  453. * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
  454. * set TXFCONT as 0xff, until buf_count less than 0xff.
  455. */
  456. static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
  457. {
  458. struct nvt_dev *nvt = dev->priv;
  459. unsigned long flags;
  460. unsigned int i;
  461. u8 iren;
  462. int ret;
  463. spin_lock_irqsave(&nvt->tx.lock, flags);
  464. ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
  465. nvt->tx.buf_count = (ret * sizeof(unsigned));
  466. memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
  467. nvt->tx.cur_buf_num = 0;
  468. /* save currently enabled interrupts */
  469. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  470. /* now disable all interrupts, save TFU & TTR */
  471. nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
  472. nvt->tx.tx_state = ST_TX_REPLY;
  473. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
  474. CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  475. /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
  476. for (i = 0; i < 9; i++)
  477. nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
  478. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  479. wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
  480. spin_lock_irqsave(&nvt->tx.lock, flags);
  481. nvt->tx.tx_state = ST_TX_NONE;
  482. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  483. /* restore enabled interrupts to prior state */
  484. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  485. return ret;
  486. }
  487. /* dump contents of the last rx buffer we got from the hw rx fifo */
  488. static void nvt_dump_rx_buf(struct nvt_dev *nvt)
  489. {
  490. int i;
  491. printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
  492. for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
  493. printk(KERN_CONT "0x%02x ", nvt->buf[i]);
  494. printk(KERN_CONT "\n");
  495. }
  496. /*
  497. * Process raw data in rx driver buffer, store it in raw IR event kfifo,
  498. * trigger decode when appropriate.
  499. *
  500. * We get IR data samples one byte at a time. If the msb is set, its a pulse,
  501. * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
  502. * (default 50us) intervals for that pulse/space. A discrete signal is
  503. * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
  504. * to signal more IR coming (repeats) or end of IR, respectively. We store
  505. * sample data in the raw event kfifo until we see 0x7<something> (except f)
  506. * or 0x80, at which time, we trigger a decode operation.
  507. */
  508. static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
  509. {
  510. DEFINE_IR_RAW_EVENT(rawir);
  511. u8 sample;
  512. int i;
  513. nvt_dbg_verbose("%s firing", __func__);
  514. if (debug)
  515. nvt_dump_rx_buf(nvt);
  516. nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
  517. init_ir_raw_event(&rawir);
  518. for (i = 0; i < nvt->pkts; i++) {
  519. sample = nvt->buf[i];
  520. rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
  521. rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
  522. * SAMPLE_PERIOD);
  523. nvt_dbg("Storing %s with duration %d",
  524. rawir.pulse ? "pulse" : "space", rawir.duration);
  525. ir_raw_event_store_with_filter(nvt->rdev, &rawir);
  526. /*
  527. * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
  528. * indicates end of IR signal, but new data incoming. In both
  529. * cases, it means we're ready to call ir_raw_event_handle
  530. */
  531. if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
  532. nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
  533. ir_raw_event_handle(nvt->rdev);
  534. }
  535. }
  536. nvt->pkts = 0;
  537. nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
  538. ir_raw_event_handle(nvt->rdev);
  539. nvt_dbg_verbose("%s done", __func__);
  540. }
  541. static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
  542. {
  543. nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!");
  544. nvt->pkts = 0;
  545. nvt_clear_cir_fifo(nvt);
  546. ir_raw_event_reset(nvt->rdev);
  547. }
  548. /* copy data from hardware rx fifo into driver buffer */
  549. static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
  550. {
  551. unsigned long flags;
  552. u8 fifocount, val;
  553. unsigned int b_idx;
  554. bool overrun = false;
  555. int i;
  556. /* Get count of how many bytes to read from RX FIFO */
  557. fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
  558. /* if we get 0xff, probably means the logical dev is disabled */
  559. if (fifocount == 0xff)
  560. return;
  561. /* watch out for a fifo overrun condition */
  562. else if (fifocount > RX_BUF_LEN) {
  563. overrun = true;
  564. fifocount = RX_BUF_LEN;
  565. }
  566. nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
  567. spin_lock_irqsave(&nvt->nvt_lock, flags);
  568. b_idx = nvt->pkts;
  569. /* This should never happen, but lets check anyway... */
  570. if (b_idx + fifocount > RX_BUF_LEN) {
  571. nvt_process_rx_ir_data(nvt);
  572. b_idx = 0;
  573. }
  574. /* Read fifocount bytes from CIR Sample RX FIFO register */
  575. for (i = 0; i < fifocount; i++) {
  576. val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
  577. nvt->buf[b_idx + i] = val;
  578. }
  579. nvt->pkts += fifocount;
  580. nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
  581. nvt_process_rx_ir_data(nvt);
  582. if (overrun)
  583. nvt_handle_rx_fifo_overrun(nvt);
  584. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  585. }
  586. static void nvt_cir_log_irqs(u8 status, u8 iren)
  587. {
  588. nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
  589. status, iren,
  590. status & CIR_IRSTS_RDR ? " RDR" : "",
  591. status & CIR_IRSTS_RTR ? " RTR" : "",
  592. status & CIR_IRSTS_PE ? " PE" : "",
  593. status & CIR_IRSTS_RFO ? " RFO" : "",
  594. status & CIR_IRSTS_TE ? " TE" : "",
  595. status & CIR_IRSTS_TTR ? " TTR" : "",
  596. status & CIR_IRSTS_TFU ? " TFU" : "",
  597. status & CIR_IRSTS_GH ? " GH" : "",
  598. status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
  599. CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
  600. CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
  601. }
  602. static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
  603. {
  604. unsigned long flags;
  605. bool tx_inactive;
  606. u8 tx_state;
  607. spin_lock_irqsave(&nvt->tx.lock, flags);
  608. tx_state = nvt->tx.tx_state;
  609. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  610. tx_inactive = (tx_state == ST_TX_NONE);
  611. return tx_inactive;
  612. }
  613. /* interrupt service routine for incoming and outgoing CIR data */
  614. static irqreturn_t nvt_cir_isr(int irq, void *data)
  615. {
  616. struct nvt_dev *nvt = data;
  617. u8 status, iren, cur_state;
  618. unsigned long flags;
  619. nvt_dbg_verbose("%s firing", __func__);
  620. nvt_efm_enable(nvt);
  621. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  622. nvt_efm_disable(nvt);
  623. /*
  624. * Get IR Status register contents. Write 1 to ack/clear
  625. *
  626. * bit: reg name - description
  627. * 7: CIR_IRSTS_RDR - RX Data Ready
  628. * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
  629. * 5: CIR_IRSTS_PE - Packet End
  630. * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
  631. * 3: CIR_IRSTS_TE - TX FIFO Empty
  632. * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
  633. * 1: CIR_IRSTS_TFU - TX FIFO Underrun
  634. * 0: CIR_IRSTS_GH - Min Length Detected
  635. */
  636. status = nvt_cir_reg_read(nvt, CIR_IRSTS);
  637. if (!status) {
  638. nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
  639. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  640. return IRQ_RETVAL(IRQ_NONE);
  641. }
  642. /* ack/clear all irq flags we've got */
  643. nvt_cir_reg_write(nvt, status, CIR_IRSTS);
  644. nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
  645. /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
  646. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  647. if (!iren) {
  648. nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
  649. return IRQ_RETVAL(IRQ_NONE);
  650. }
  651. if (debug)
  652. nvt_cir_log_irqs(status, iren);
  653. if (status & CIR_IRSTS_RTR) {
  654. /* FIXME: add code for study/learn mode */
  655. /* We only do rx if not tx'ing */
  656. if (nvt_cir_tx_inactive(nvt))
  657. nvt_get_rx_ir_data(nvt);
  658. }
  659. if (status & CIR_IRSTS_PE) {
  660. if (nvt_cir_tx_inactive(nvt))
  661. nvt_get_rx_ir_data(nvt);
  662. spin_lock_irqsave(&nvt->nvt_lock, flags);
  663. cur_state = nvt->study_state;
  664. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  665. if (cur_state == ST_STUDY_NONE)
  666. nvt_clear_cir_fifo(nvt);
  667. }
  668. if (status & CIR_IRSTS_TE)
  669. nvt_clear_tx_fifo(nvt);
  670. if (status & CIR_IRSTS_TTR) {
  671. unsigned int pos, count;
  672. u8 tmp;
  673. spin_lock_irqsave(&nvt->tx.lock, flags);
  674. pos = nvt->tx.cur_buf_num;
  675. count = nvt->tx.buf_count;
  676. /* Write data into the hardware tx fifo while pos < count */
  677. if (pos < count) {
  678. nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
  679. nvt->tx.cur_buf_num++;
  680. /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
  681. } else {
  682. tmp = nvt_cir_reg_read(nvt, CIR_IREN);
  683. nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
  684. }
  685. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  686. }
  687. if (status & CIR_IRSTS_TFU) {
  688. spin_lock_irqsave(&nvt->tx.lock, flags);
  689. if (nvt->tx.tx_state == ST_TX_REPLY) {
  690. nvt->tx.tx_state = ST_TX_REQUEST;
  691. wake_up(&nvt->tx.queue);
  692. }
  693. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  694. }
  695. nvt_dbg_verbose("%s done", __func__);
  696. return IRQ_RETVAL(IRQ_HANDLED);
  697. }
  698. /* Interrupt service routine for CIR Wake */
  699. static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
  700. {
  701. u8 status, iren, val;
  702. struct nvt_dev *nvt = data;
  703. unsigned long flags;
  704. nvt_dbg_wake("%s firing", __func__);
  705. status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
  706. if (!status)
  707. return IRQ_RETVAL(IRQ_NONE);
  708. if (status & CIR_WAKE_IRSTS_IR_PENDING)
  709. nvt_clear_cir_wake_fifo(nvt);
  710. nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
  711. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
  712. /* Interrupt may be shared with CIR, bail if Wake not enabled */
  713. iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
  714. if (!iren) {
  715. nvt_dbg_wake("%s exiting, wake not enabled", __func__);
  716. return IRQ_RETVAL(IRQ_HANDLED);
  717. }
  718. if ((status & CIR_WAKE_IRSTS_PE) &&
  719. (nvt->wake_state == ST_WAKE_START)) {
  720. while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
  721. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  722. nvt_dbg("setting wake up key: 0x%x", val);
  723. }
  724. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  725. spin_lock_irqsave(&nvt->nvt_lock, flags);
  726. nvt->wake_state = ST_WAKE_FINISH;
  727. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  728. }
  729. nvt_dbg_wake("%s done", __func__);
  730. return IRQ_RETVAL(IRQ_HANDLED);
  731. }
  732. static void nvt_enable_cir(struct nvt_dev *nvt)
  733. {
  734. /* set function enable flags */
  735. nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  736. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  737. CIR_IRCON);
  738. nvt_efm_enable(nvt);
  739. /* enable the CIR logical device */
  740. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  741. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  742. nvt_efm_disable(nvt);
  743. /* clear all pending interrupts */
  744. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  745. /* enable interrupts */
  746. nvt_set_cir_iren(nvt);
  747. }
  748. static void nvt_disable_cir(struct nvt_dev *nvt)
  749. {
  750. /* disable CIR interrupts */
  751. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  752. /* clear any and all pending interrupts */
  753. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  754. /* clear all function enable flags */
  755. nvt_cir_reg_write(nvt, 0, CIR_IRCON);
  756. /* clear hardware rx and tx fifos */
  757. nvt_clear_cir_fifo(nvt);
  758. nvt_clear_tx_fifo(nvt);
  759. nvt_efm_enable(nvt);
  760. /* disable the CIR logical device */
  761. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  762. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  763. nvt_efm_disable(nvt);
  764. }
  765. static int nvt_open(struct rc_dev *dev)
  766. {
  767. struct nvt_dev *nvt = dev->priv;
  768. unsigned long flags;
  769. spin_lock_irqsave(&nvt->nvt_lock, flags);
  770. nvt_enable_cir(nvt);
  771. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  772. return 0;
  773. }
  774. static void nvt_close(struct rc_dev *dev)
  775. {
  776. struct nvt_dev *nvt = dev->priv;
  777. unsigned long flags;
  778. spin_lock_irqsave(&nvt->nvt_lock, flags);
  779. nvt_disable_cir(nvt);
  780. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  781. }
  782. /* Allocate memory, probe hardware, and initialize everything */
  783. static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
  784. {
  785. struct nvt_dev *nvt;
  786. struct rc_dev *rdev;
  787. int ret = -ENOMEM;
  788. nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL);
  789. if (!nvt)
  790. return ret;
  791. /* input device for IR remote (and tx) */
  792. rdev = rc_allocate_device();
  793. if (!rdev)
  794. goto exit_free_dev_rdev;
  795. ret = -ENODEV;
  796. /* activate pnp device */
  797. if (pnp_activate_dev(pdev) < 0) {
  798. dev_err(&pdev->dev, "Could not activate PNP device!\n");
  799. goto exit_free_dev_rdev;
  800. }
  801. /* validate pnp resources */
  802. if (!pnp_port_valid(pdev, 0) ||
  803. pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
  804. dev_err(&pdev->dev, "IR PNP Port not valid!\n");
  805. goto exit_free_dev_rdev;
  806. }
  807. if (!pnp_irq_valid(pdev, 0)) {
  808. dev_err(&pdev->dev, "PNP IRQ not valid!\n");
  809. goto exit_free_dev_rdev;
  810. }
  811. if (!pnp_port_valid(pdev, 1) ||
  812. pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
  813. dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
  814. goto exit_free_dev_rdev;
  815. }
  816. nvt->cir_addr = pnp_port_start(pdev, 0);
  817. nvt->cir_irq = pnp_irq(pdev, 0);
  818. nvt->cir_wake_addr = pnp_port_start(pdev, 1);
  819. /* irq is always shared between cir and cir wake */
  820. nvt->cir_wake_irq = nvt->cir_irq;
  821. nvt->cr_efir = CR_EFIR;
  822. nvt->cr_efdr = CR_EFDR;
  823. spin_lock_init(&nvt->nvt_lock);
  824. spin_lock_init(&nvt->tx.lock);
  825. pnp_set_drvdata(pdev, nvt);
  826. nvt->pdev = pdev;
  827. init_waitqueue_head(&nvt->tx.queue);
  828. ret = nvt_hw_detect(nvt);
  829. if (ret)
  830. goto exit_free_dev_rdev;
  831. /* Initialize CIR & CIR Wake Logical Devices */
  832. nvt_efm_enable(nvt);
  833. nvt_cir_ldev_init(nvt);
  834. nvt_cir_wake_ldev_init(nvt);
  835. nvt_efm_disable(nvt);
  836. /* Initialize CIR & CIR Wake Config Registers */
  837. nvt_cir_regs_init(nvt);
  838. nvt_cir_wake_regs_init(nvt);
  839. /* Set up the rc device */
  840. rdev->priv = nvt;
  841. rdev->driver_type = RC_DRIVER_IR_RAW;
  842. rdev->allowed_protocols = RC_BIT_ALL;
  843. rdev->open = nvt_open;
  844. rdev->close = nvt_close;
  845. rdev->tx_ir = nvt_tx_ir;
  846. rdev->s_tx_carrier = nvt_set_tx_carrier;
  847. rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
  848. rdev->input_phys = "nuvoton/cir0";
  849. rdev->input_id.bustype = BUS_HOST;
  850. rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
  851. rdev->input_id.product = nvt->chip_major;
  852. rdev->input_id.version = nvt->chip_minor;
  853. rdev->dev.parent = &pdev->dev;
  854. rdev->driver_name = NVT_DRIVER_NAME;
  855. rdev->map_name = RC_MAP_RC6_MCE;
  856. rdev->timeout = MS_TO_NS(100);
  857. /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
  858. rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
  859. #if 0
  860. rdev->min_timeout = XYZ;
  861. rdev->max_timeout = XYZ;
  862. /* tx bits */
  863. rdev->tx_resolution = XYZ;
  864. #endif
  865. nvt->rdev = rdev;
  866. ret = rc_register_device(rdev);
  867. if (ret)
  868. goto exit_free_dev_rdev;
  869. ret = -EBUSY;
  870. /* now claim resources */
  871. if (!request_region(nvt->cir_addr,
  872. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  873. goto exit_unregister_device;
  874. if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
  875. NVT_DRIVER_NAME, (void *)nvt))
  876. goto exit_release_cir_addr;
  877. if (!request_region(nvt->cir_wake_addr,
  878. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  879. goto exit_free_irq;
  880. if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
  881. NVT_DRIVER_NAME, (void *)nvt))
  882. goto exit_release_cir_wake_addr;
  883. device_init_wakeup(&pdev->dev, true);
  884. nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
  885. if (debug) {
  886. cir_dump_regs(nvt);
  887. cir_wake_dump_regs(nvt);
  888. }
  889. return 0;
  890. exit_release_cir_wake_addr:
  891. release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
  892. exit_free_irq:
  893. free_irq(nvt->cir_irq, nvt);
  894. exit_release_cir_addr:
  895. release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
  896. exit_unregister_device:
  897. rc_unregister_device(rdev);
  898. rdev = NULL;
  899. exit_free_dev_rdev:
  900. rc_free_device(rdev);
  901. kfree(nvt);
  902. return ret;
  903. }
  904. static void nvt_remove(struct pnp_dev *pdev)
  905. {
  906. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  907. unsigned long flags;
  908. spin_lock_irqsave(&nvt->nvt_lock, flags);
  909. /* disable CIR */
  910. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  911. nvt_disable_cir(nvt);
  912. /* enable CIR Wake (for IR power-on) */
  913. nvt_enable_wake(nvt);
  914. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  915. /* free resources */
  916. free_irq(nvt->cir_irq, nvt);
  917. free_irq(nvt->cir_wake_irq, nvt);
  918. release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
  919. release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
  920. rc_unregister_device(nvt->rdev);
  921. kfree(nvt);
  922. }
  923. static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
  924. {
  925. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  926. unsigned long flags;
  927. nvt_dbg("%s called", __func__);
  928. /* zero out misc state tracking */
  929. spin_lock_irqsave(&nvt->nvt_lock, flags);
  930. nvt->study_state = ST_STUDY_NONE;
  931. nvt->wake_state = ST_WAKE_NONE;
  932. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  933. spin_lock_irqsave(&nvt->tx.lock, flags);
  934. nvt->tx.tx_state = ST_TX_NONE;
  935. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  936. /* disable all CIR interrupts */
  937. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  938. nvt_efm_enable(nvt);
  939. /* disable cir logical dev */
  940. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  941. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  942. nvt_efm_disable(nvt);
  943. /* make sure wake is enabled */
  944. nvt_enable_wake(nvt);
  945. return 0;
  946. }
  947. static int nvt_resume(struct pnp_dev *pdev)
  948. {
  949. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  950. nvt_dbg("%s called", __func__);
  951. /* open interrupt */
  952. nvt_set_cir_iren(nvt);
  953. /* Enable CIR logical device */
  954. nvt_efm_enable(nvt);
  955. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  956. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  957. nvt_efm_disable(nvt);
  958. nvt_cir_regs_init(nvt);
  959. nvt_cir_wake_regs_init(nvt);
  960. return 0;
  961. }
  962. static void nvt_shutdown(struct pnp_dev *pdev)
  963. {
  964. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  965. nvt_enable_wake(nvt);
  966. }
  967. static const struct pnp_device_id nvt_ids[] = {
  968. { "WEC0530", 0 }, /* CIR */
  969. { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
  970. { "", 0 },
  971. };
  972. static struct pnp_driver nvt_driver = {
  973. .name = NVT_DRIVER_NAME,
  974. .id_table = nvt_ids,
  975. .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
  976. .probe = nvt_probe,
  977. .remove = nvt_remove,
  978. .suspend = nvt_suspend,
  979. .resume = nvt_resume,
  980. .shutdown = nvt_shutdown,
  981. };
  982. module_param(debug, int, S_IRUGO | S_IWUSR);
  983. MODULE_PARM_DESC(debug, "Enable debugging output");
  984. MODULE_DEVICE_TABLE(pnp, nvt_ids);
  985. MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
  986. MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
  987. MODULE_LICENSE("GPL");
  988. module_pnp_driver(nvt_driver);