lx_core.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198
  1. /* -*- linux-c -*- *
  2. *
  3. * ALSA driver for the digigram lx6464es interface
  4. * low-level interface
  5. *
  6. * Copyright (c) 2009 Tim Blechmann <tim@klingt.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; see the file COPYING. If not, write to
  20. * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  21. * Boston, MA 02111-1307, USA.
  22. *
  23. */
  24. /* #define RMH_DEBUG 1 */
  25. #include <linux/bitops.h>
  26. #include <linux/module.h>
  27. #include <linux/pci.h>
  28. #include <linux/delay.h>
  29. #include "lx6464es.h"
  30. #include "lx_core.h"
  31. /* low-level register access */
  32. static const unsigned long dsp_port_offsets[] = {
  33. 0,
  34. 0x400,
  35. 0x401,
  36. 0x402,
  37. 0x403,
  38. 0x404,
  39. 0x405,
  40. 0x406,
  41. 0x407,
  42. 0x408,
  43. 0x409,
  44. 0x40a,
  45. 0x40b,
  46. 0x40c,
  47. 0x410,
  48. 0x411,
  49. 0x412,
  50. 0x413,
  51. 0x414,
  52. 0x415,
  53. 0x416,
  54. 0x420,
  55. 0x430,
  56. 0x431,
  57. 0x432,
  58. 0x433,
  59. 0x434,
  60. 0x440
  61. };
  62. static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
  63. {
  64. void __iomem *base_address = chip->port_dsp_bar;
  65. return base_address + dsp_port_offsets[port]*4;
  66. }
  67. unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
  68. {
  69. void __iomem *address = lx_dsp_register(chip, port);
  70. return ioread32(address);
  71. }
  72. static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
  73. u32 len)
  74. {
  75. u32 __iomem *address = lx_dsp_register(chip, port);
  76. int i;
  77. /* we cannot use memcpy_fromio */
  78. for (i = 0; i != len; ++i)
  79. data[i] = ioread32(address + i);
  80. }
  81. void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
  82. {
  83. void __iomem *address = lx_dsp_register(chip, port);
  84. iowrite32(data, address);
  85. }
  86. static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
  87. const u32 *data, u32 len)
  88. {
  89. u32 __iomem *address = lx_dsp_register(chip, port);
  90. int i;
  91. /* we cannot use memcpy_to */
  92. for (i = 0; i != len; ++i)
  93. iowrite32(data[i], address + i);
  94. }
  95. static const unsigned long plx_port_offsets[] = {
  96. 0x04,
  97. 0x40,
  98. 0x44,
  99. 0x48,
  100. 0x4c,
  101. 0x50,
  102. 0x54,
  103. 0x58,
  104. 0x5c,
  105. 0x64,
  106. 0x68,
  107. 0x6C
  108. };
  109. static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
  110. {
  111. void __iomem *base_address = chip->port_plx_remapped;
  112. return base_address + plx_port_offsets[port];
  113. }
  114. unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
  115. {
  116. void __iomem *address = lx_plx_register(chip, port);
  117. return ioread32(address);
  118. }
  119. void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
  120. {
  121. void __iomem *address = lx_plx_register(chip, port);
  122. iowrite32(data, address);
  123. }
  124. /* rmh */
  125. #ifdef CONFIG_SND_DEBUG
  126. #define CMD_NAME(a) a
  127. #else
  128. #define CMD_NAME(a) NULL
  129. #endif
  130. #define Reg_CSM_MR 0x00000002
  131. #define Reg_CSM_MC 0x00000001
  132. struct dsp_cmd_info {
  133. u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits
  134. * word).*/
  135. u16 dcCmdLength; /* Command length in words of 24 bits.*/
  136. u16 dcStatusType; /* Status type: 0 for fixed length, 1 for
  137. * random. */
  138. u16 dcStatusLength; /* Status length (if fixed).*/
  139. char *dcOpName;
  140. };
  141. /*
  142. Initialization and control data for the Microblaze interface
  143. - OpCode:
  144. the opcode field of the command set at the proper offset
  145. - CmdLength
  146. the number of command words
  147. - StatusType
  148. offset in the status registers: 0 means that the return value may be
  149. different from 0, and must be read
  150. - StatusLength
  151. the number of status words (in addition to the return value)
  152. */
  153. static struct dsp_cmd_info dsp_commands[] =
  154. {
  155. { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/
  156. , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") },
  157. { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/
  158. , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") },
  159. { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/
  160. , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") },
  161. { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/
  162. , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") },
  163. { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/
  164. , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") },
  165. { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/
  166. , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") },
  167. { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/
  168. , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") },
  169. { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/
  170. , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") },
  171. { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/
  172. , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
  173. { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/
  174. , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") },
  175. { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/
  176. , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") },
  177. { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/
  178. , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") },
  179. { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/
  180. , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") },
  181. { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/
  182. , 1 , 0 /**/ , CMD_NAME("SET_MUTE") },
  183. { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/
  184. , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") },
  185. { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/
  186. , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") },
  187. { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/
  188. , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") },
  189. { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/
  190. , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") },
  191. { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/
  192. , 1 , 1 /**/ , CMD_NAME("GET_PEAK") },
  193. { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/
  194. , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") },
  195. };
  196. static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
  197. {
  198. snd_BUG_ON(cmd >= CMD_14_INVALID);
  199. rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
  200. rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
  201. rmh->stat_len = dsp_commands[cmd].dcStatusLength;
  202. rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
  203. rmh->cmd_idx = cmd;
  204. memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
  205. #ifdef CONFIG_SND_DEBUG
  206. memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
  207. #endif
  208. #ifdef RMH_DEBUG
  209. rmh->cmd_idx = cmd;
  210. #endif
  211. }
  212. #ifdef RMH_DEBUG
  213. #define LXRMH "lx6464es rmh: "
  214. static void lx_message_dump(struct lx_rmh *rmh)
  215. {
  216. u8 idx = rmh->cmd_idx;
  217. int i;
  218. snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
  219. for (i = 0; i != rmh->cmd_len; ++i)
  220. snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
  221. for (i = 0; i != rmh->stat_len; ++i)
  222. snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
  223. snd_printk("\n");
  224. }
  225. #else
  226. static inline void lx_message_dump(struct lx_rmh *rmh)
  227. {}
  228. #endif
  229. /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */
  230. #define XILINX_TIMEOUT_MS 40
  231. #define XILINX_POLL_NO_SLEEP 100
  232. #define XILINX_POLL_ITERATIONS 150
  233. static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
  234. {
  235. u32 reg = ED_DSP_TIMED_OUT;
  236. int dwloop;
  237. if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
  238. dev_err(chip->card->dev, "PIOSendMessage eReg_CSM %x\n", reg);
  239. return -EBUSY;
  240. }
  241. /* write command */
  242. lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
  243. /* MicoBlaze gogogo */
  244. lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
  245. /* wait for device to answer */
  246. for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
  247. if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
  248. if (rmh->dsp_stat == 0)
  249. reg = lx_dsp_reg_read(chip, eReg_CRM1);
  250. else
  251. reg = 0;
  252. goto polling_successful;
  253. } else
  254. udelay(1);
  255. }
  256. dev_warn(chip->card->dev, "TIMEOUT lx_message_send_atomic! "
  257. "polling failed\n");
  258. polling_successful:
  259. if ((reg & ERROR_VALUE) == 0) {
  260. /* read response */
  261. if (rmh->stat_len) {
  262. snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
  263. lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
  264. rmh->stat_len);
  265. }
  266. } else
  267. dev_err(chip->card->dev, "rmh error: %08x\n", reg);
  268. /* clear Reg_CSM_MR */
  269. lx_dsp_reg_write(chip, eReg_CSM, 0);
  270. switch (reg) {
  271. case ED_DSP_TIMED_OUT:
  272. dev_warn(chip->card->dev, "lx_message_send: dsp timeout\n");
  273. return -ETIMEDOUT;
  274. case ED_DSP_CRASHED:
  275. dev_warn(chip->card->dev, "lx_message_send: dsp crashed\n");
  276. return -EAGAIN;
  277. }
  278. lx_message_dump(rmh);
  279. return reg;
  280. }
  281. /* low-level dsp access */
  282. int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
  283. {
  284. u16 ret;
  285. mutex_lock(&chip->msg_lock);
  286. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  287. ret = lx_message_send_atomic(chip, &chip->rmh);
  288. *rdsp_version = chip->rmh.stat[1];
  289. mutex_unlock(&chip->msg_lock);
  290. return ret;
  291. }
  292. int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
  293. {
  294. u16 ret = 0;
  295. u32 freq_raw = 0;
  296. u32 freq = 0;
  297. u32 frequency = 0;
  298. mutex_lock(&chip->msg_lock);
  299. lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
  300. ret = lx_message_send_atomic(chip, &chip->rmh);
  301. if (ret == 0) {
  302. freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
  303. freq = freq_raw & XES_FREQ_COUNT8_MASK;
  304. if ((freq < XES_FREQ_COUNT8_48_MAX) ||
  305. (freq > XES_FREQ_COUNT8_44_MIN))
  306. frequency = 0; /* unknown */
  307. else if (freq >= XES_FREQ_COUNT8_44_MAX)
  308. frequency = 44100;
  309. else
  310. frequency = 48000;
  311. }
  312. mutex_unlock(&chip->msg_lock);
  313. *rfreq = frequency * chip->freq_ratio;
  314. return ret;
  315. }
  316. int lx_dsp_get_mac(struct lx6464es *chip)
  317. {
  318. u32 macmsb, maclsb;
  319. macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
  320. maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
  321. /* todo: endianess handling */
  322. chip->mac_address[5] = ((u8 *)(&maclsb))[0];
  323. chip->mac_address[4] = ((u8 *)(&maclsb))[1];
  324. chip->mac_address[3] = ((u8 *)(&maclsb))[2];
  325. chip->mac_address[2] = ((u8 *)(&macmsb))[0];
  326. chip->mac_address[1] = ((u8 *)(&macmsb))[1];
  327. chip->mac_address[0] = ((u8 *)(&macmsb))[2];
  328. return 0;
  329. }
  330. int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
  331. {
  332. int ret;
  333. mutex_lock(&chip->msg_lock);
  334. lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
  335. chip->rmh.cmd[0] |= gran;
  336. ret = lx_message_send_atomic(chip, &chip->rmh);
  337. mutex_unlock(&chip->msg_lock);
  338. return ret;
  339. }
  340. int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
  341. {
  342. int ret;
  343. mutex_lock(&chip->msg_lock);
  344. lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
  345. chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
  346. ret = lx_message_send_atomic(chip, &chip->rmh);
  347. if (!ret)
  348. memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
  349. mutex_unlock(&chip->msg_lock);
  350. return ret;
  351. }
  352. #define PIPE_INFO_TO_CMD(capture, pipe) \
  353. ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
  354. /* low-level pipe handling */
  355. int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
  356. int channels)
  357. {
  358. int err;
  359. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  360. mutex_lock(&chip->msg_lock);
  361. lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
  362. chip->rmh.cmd[0] |= pipe_cmd;
  363. chip->rmh.cmd[0] |= channels;
  364. err = lx_message_send_atomic(chip, &chip->rmh);
  365. mutex_unlock(&chip->msg_lock);
  366. if (err != 0)
  367. dev_err(chip->card->dev, "could not allocate pipe\n");
  368. return err;
  369. }
  370. int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
  371. {
  372. int err;
  373. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  374. mutex_lock(&chip->msg_lock);
  375. lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
  376. chip->rmh.cmd[0] |= pipe_cmd;
  377. err = lx_message_send_atomic(chip, &chip->rmh);
  378. mutex_unlock(&chip->msg_lock);
  379. return err;
  380. }
  381. int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
  382. u32 *r_needed, u32 *r_freed, u32 *size_array)
  383. {
  384. int err;
  385. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  386. #ifdef CONFIG_SND_DEBUG
  387. if (size_array)
  388. memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
  389. #endif
  390. *r_needed = 0;
  391. *r_freed = 0;
  392. mutex_lock(&chip->msg_lock);
  393. lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
  394. chip->rmh.cmd[0] |= pipe_cmd;
  395. err = lx_message_send_atomic(chip, &chip->rmh);
  396. if (!err) {
  397. int i;
  398. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  399. u32 stat = chip->rmh.stat[i];
  400. if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
  401. /* finished */
  402. *r_freed += 1;
  403. if (size_array)
  404. size_array[i] = stat & MASK_DATA_SIZE;
  405. } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
  406. == 0)
  407. /* free */
  408. *r_needed += 1;
  409. }
  410. dev_dbg(chip->card->dev,
  411. "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
  412. *r_needed, *r_freed);
  413. for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
  414. for (i = 0; i != chip->rmh.stat_len; ++i)
  415. dev_dbg(chip->card->dev,
  416. " stat[%d]: %x, %x\n", i,
  417. chip->rmh.stat[i],
  418. chip->rmh.stat[i] & MASK_DATA_SIZE);
  419. }
  420. }
  421. mutex_unlock(&chip->msg_lock);
  422. return err;
  423. }
  424. int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
  425. {
  426. int err;
  427. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  428. mutex_lock(&chip->msg_lock);
  429. lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
  430. chip->rmh.cmd[0] |= pipe_cmd;
  431. err = lx_message_send_atomic(chip, &chip->rmh);
  432. mutex_unlock(&chip->msg_lock);
  433. return err;
  434. }
  435. static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
  436. {
  437. int err;
  438. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  439. mutex_lock(&chip->msg_lock);
  440. lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
  441. chip->rmh.cmd[0] |= pipe_cmd;
  442. err = lx_message_send_atomic(chip, &chip->rmh);
  443. mutex_unlock(&chip->msg_lock);
  444. return err;
  445. }
  446. int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
  447. {
  448. int err;
  449. err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
  450. if (err < 0)
  451. return err;
  452. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  453. return err;
  454. }
  455. int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
  456. {
  457. int err = 0;
  458. err = lx_pipe_wait_for_start(chip, pipe, is_capture);
  459. if (err < 0)
  460. return err;
  461. err = lx_pipe_toggle_state(chip, pipe, is_capture);
  462. return err;
  463. }
  464. int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
  465. u64 *rsample_count)
  466. {
  467. int err;
  468. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  469. mutex_lock(&chip->msg_lock);
  470. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  471. chip->rmh.cmd[0] |= pipe_cmd;
  472. chip->rmh.stat_len = 2; /* need all words here! */
  473. err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */
  474. if (err != 0)
  475. dev_err(chip->card->dev,
  476. "could not query pipe's sample count\n");
  477. else {
  478. *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  479. << 24) /* hi part */
  480. + chip->rmh.stat[1]; /* lo part */
  481. }
  482. mutex_unlock(&chip->msg_lock);
  483. return err;
  484. }
  485. int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
  486. {
  487. int err;
  488. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  489. mutex_lock(&chip->msg_lock);
  490. lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
  491. chip->rmh.cmd[0] |= pipe_cmd;
  492. err = lx_message_send_atomic(chip, &chip->rmh);
  493. if (err != 0)
  494. dev_err(chip->card->dev, "could not query pipe's state\n");
  495. else
  496. *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
  497. mutex_unlock(&chip->msg_lock);
  498. return err;
  499. }
  500. static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
  501. int is_capture, u16 state)
  502. {
  503. int i;
  504. /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms:
  505. * timeout 50 ms */
  506. for (i = 0; i != 50; ++i) {
  507. u16 current_state;
  508. int err = lx_pipe_state(chip, pipe, is_capture, &current_state);
  509. if (err < 0)
  510. return err;
  511. if (current_state == state)
  512. return 0;
  513. mdelay(1);
  514. }
  515. return -ETIMEDOUT;
  516. }
  517. int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
  518. {
  519. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
  520. }
  521. int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
  522. {
  523. return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
  524. }
  525. /* low-level stream handling */
  526. int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
  527. int is_capture, enum stream_state_t state)
  528. {
  529. int err;
  530. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  531. mutex_lock(&chip->msg_lock);
  532. lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
  533. chip->rmh.cmd[0] |= pipe_cmd;
  534. chip->rmh.cmd[0] |= state;
  535. err = lx_message_send_atomic(chip, &chip->rmh);
  536. mutex_unlock(&chip->msg_lock);
  537. return err;
  538. }
  539. int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
  540. u32 pipe, int is_capture)
  541. {
  542. int err;
  543. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  544. u32 channels = runtime->channels;
  545. if (runtime->channels != channels)
  546. dev_err(chip->card->dev, "channel count mismatch: %d vs %d",
  547. runtime->channels, channels);
  548. mutex_lock(&chip->msg_lock);
  549. lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
  550. chip->rmh.cmd[0] |= pipe_cmd;
  551. if (runtime->sample_bits == 16)
  552. /* 16 bit format */
  553. chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
  554. if (snd_pcm_format_little_endian(runtime->format))
  555. /* little endian/intel format */
  556. chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
  557. chip->rmh.cmd[0] |= channels-1;
  558. err = lx_message_send_atomic(chip, &chip->rmh);
  559. mutex_unlock(&chip->msg_lock);
  560. return err;
  561. }
  562. int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
  563. int *rstate)
  564. {
  565. int err;
  566. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  567. mutex_lock(&chip->msg_lock);
  568. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  569. chip->rmh.cmd[0] |= pipe_cmd;
  570. err = lx_message_send_atomic(chip, &chip->rmh);
  571. *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
  572. mutex_unlock(&chip->msg_lock);
  573. return err;
  574. }
  575. int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
  576. u64 *r_bytepos)
  577. {
  578. int err;
  579. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  580. mutex_lock(&chip->msg_lock);
  581. lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
  582. chip->rmh.cmd[0] |= pipe_cmd;
  583. err = lx_message_send_atomic(chip, &chip->rmh);
  584. *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
  585. << 32) /* hi part */
  586. + chip->rmh.stat[1]; /* lo part */
  587. mutex_unlock(&chip->msg_lock);
  588. return err;
  589. }
  590. /* low-level buffer handling */
  591. int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
  592. u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
  593. u32 *r_buffer_index)
  594. {
  595. int err;
  596. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  597. mutex_lock(&chip->msg_lock);
  598. lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
  599. chip->rmh.cmd[0] |= pipe_cmd;
  600. chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */
  601. /* todo: pause request, circular buffer */
  602. chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
  603. chip->rmh.cmd[2] = buf_address_lo;
  604. if (buf_address_hi) {
  605. chip->rmh.cmd_len = 4;
  606. chip->rmh.cmd[3] = buf_address_hi;
  607. chip->rmh.cmd[0] |= BF_64BITS_ADR;
  608. }
  609. err = lx_message_send_atomic(chip, &chip->rmh);
  610. if (err == 0) {
  611. *r_buffer_index = chip->rmh.stat[0];
  612. goto done;
  613. }
  614. if (err == EB_RBUFFERS_TABLE_OVERFLOW)
  615. dev_err(chip->card->dev,
  616. "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
  617. if (err == EB_INVALID_STREAM)
  618. dev_err(chip->card->dev,
  619. "lx_buffer_give EB_INVALID_STREAM\n");
  620. if (err == EB_CMD_REFUSED)
  621. dev_err(chip->card->dev,
  622. "lx_buffer_give EB_CMD_REFUSED\n");
  623. done:
  624. mutex_unlock(&chip->msg_lock);
  625. return err;
  626. }
  627. int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
  628. u32 *r_buffer_size)
  629. {
  630. int err;
  631. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  632. mutex_lock(&chip->msg_lock);
  633. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  634. chip->rmh.cmd[0] |= pipe_cmd;
  635. chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the
  636. * microblaze will seek for it */
  637. err = lx_message_send_atomic(chip, &chip->rmh);
  638. if (err == 0)
  639. *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
  640. mutex_unlock(&chip->msg_lock);
  641. return err;
  642. }
  643. int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
  644. u32 buffer_index)
  645. {
  646. int err;
  647. u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
  648. mutex_lock(&chip->msg_lock);
  649. lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
  650. chip->rmh.cmd[0] |= pipe_cmd;
  651. chip->rmh.cmd[0] |= buffer_index;
  652. err = lx_message_send_atomic(chip, &chip->rmh);
  653. mutex_unlock(&chip->msg_lock);
  654. return err;
  655. }
  656. /* low-level gain/peak handling
  657. *
  658. * \todo: can we unmute capture/playback channels independently?
  659. *
  660. * */
  661. int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
  662. {
  663. int err;
  664. /* bit set to 1: channel muted */
  665. u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
  666. mutex_lock(&chip->msg_lock);
  667. lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
  668. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
  669. chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */
  670. chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */
  671. dev_dbg(chip->card->dev,
  672. "mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
  673. chip->rmh.cmd[2]);
  674. err = lx_message_send_atomic(chip, &chip->rmh);
  675. mutex_unlock(&chip->msg_lock);
  676. return err;
  677. }
  678. static u32 peak_map[] = {
  679. 0x00000109, /* -90.308dB */
  680. 0x0000083B, /* -72.247dB */
  681. 0x000020C4, /* -60.205dB */
  682. 0x00008273, /* -48.030dB */
  683. 0x00020756, /* -36.005dB */
  684. 0x00040C37, /* -30.001dB */
  685. 0x00081385, /* -24.002dB */
  686. 0x00101D3F, /* -18.000dB */
  687. 0x0016C310, /* -15.000dB */
  688. 0x002026F2, /* -12.001dB */
  689. 0x002D6A86, /* -9.000dB */
  690. 0x004026E6, /* -6.004dB */
  691. 0x005A9DF6, /* -3.000dB */
  692. 0x0065AC8B, /* -2.000dB */
  693. 0x00721481, /* -1.000dB */
  694. 0x007FFFFF, /* FS */
  695. };
  696. int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
  697. u32 *r_levels)
  698. {
  699. int err = 0;
  700. int i;
  701. mutex_lock(&chip->msg_lock);
  702. for (i = 0; i < channels; i += 4) {
  703. u32 s0, s1, s2, s3;
  704. lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
  705. chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
  706. err = lx_message_send_atomic(chip, &chip->rmh);
  707. if (err == 0) {
  708. s0 = peak_map[chip->rmh.stat[0] & 0x0F];
  709. s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
  710. s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
  711. s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
  712. } else
  713. s0 = s1 = s2 = s3 = 0;
  714. r_levels[0] = s0;
  715. r_levels[1] = s1;
  716. r_levels[2] = s2;
  717. r_levels[3] = s3;
  718. r_levels += 4;
  719. }
  720. mutex_unlock(&chip->msg_lock);
  721. return err;
  722. }
  723. /* interrupt handling */
  724. #define PCX_IRQ_NONE 0
  725. #define IRQCS_ACTIVE_PCIDB BIT(13)
  726. #define IRQCS_ENABLE_PCIIRQ BIT(8)
  727. #define IRQCS_ENABLE_PCIDB BIT(9)
  728. static u32 lx_interrupt_test_ack(struct lx6464es *chip)
  729. {
  730. u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
  731. /* Test if PCI Doorbell interrupt is active */
  732. if (irqcs & IRQCS_ACTIVE_PCIDB) {
  733. u32 temp;
  734. irqcs = PCX_IRQ_NONE;
  735. while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
  736. /* RAZ interrupt */
  737. irqcs |= temp;
  738. lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
  739. }
  740. return irqcs;
  741. }
  742. return PCX_IRQ_NONE;
  743. }
  744. static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
  745. int *r_async_pending, int *r_async_escmd)
  746. {
  747. u32 irq_async;
  748. u32 irqsrc = lx_interrupt_test_ack(chip);
  749. if (irqsrc == PCX_IRQ_NONE)
  750. return 0;
  751. *r_irqsrc = irqsrc;
  752. irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response
  753. * (set by xilinx) + EOB */
  754. if (irq_async & MASK_SYS_STATUS_ESA) {
  755. irq_async &= ~MASK_SYS_STATUS_ESA;
  756. *r_async_escmd = 1;
  757. }
  758. if (irq_async) {
  759. /* dev_dbg(chip->card->dev, "interrupt: async event pending\n"); */
  760. *r_async_pending = 1;
  761. }
  762. return 1;
  763. }
  764. static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
  765. int *r_freq_changed,
  766. u64 *r_notified_in_pipe_mask,
  767. u64 *r_notified_out_pipe_mask)
  768. {
  769. int err;
  770. u32 stat[9]; /* answer from CMD_04_GET_EVENT */
  771. /* We can optimize this to not read dumb events.
  772. * Answer words are in the following order:
  773. * Stat[0] general status
  774. * Stat[1] end of buffer OUT pF
  775. * Stat[2] end of buffer OUT pf
  776. * Stat[3] end of buffer IN pF
  777. * Stat[4] end of buffer IN pf
  778. * Stat[5] MSB underrun
  779. * Stat[6] LSB underrun
  780. * Stat[7] MSB overrun
  781. * Stat[8] LSB overrun
  782. * */
  783. u64 orun_mask;
  784. u64 urun_mask;
  785. int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
  786. int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
  787. *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
  788. err = lx_dsp_read_async_events(chip, stat);
  789. if (err < 0)
  790. return err;
  791. if (eb_pending_in) {
  792. *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
  793. + stat[4];
  794. dev_dbg(chip->card->dev, "interrupt: EOBI pending %llx\n",
  795. *r_notified_in_pipe_mask);
  796. }
  797. if (eb_pending_out) {
  798. *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
  799. + stat[2];
  800. dev_dbg(chip->card->dev, "interrupt: EOBO pending %llx\n",
  801. *r_notified_out_pipe_mask);
  802. }
  803. orun_mask = ((u64)stat[7] << 32) + stat[8];
  804. urun_mask = ((u64)stat[5] << 32) + stat[6];
  805. /* todo: handle xrun notification */
  806. return err;
  807. }
  808. static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
  809. struct lx_stream *lx_stream)
  810. {
  811. struct snd_pcm_substream *substream = lx_stream->stream;
  812. const unsigned int is_capture = lx_stream->is_capture;
  813. int err;
  814. const u32 channels = substream->runtime->channels;
  815. const u32 bytes_per_frame = channels * 3;
  816. const u32 period_size = substream->runtime->period_size;
  817. const u32 period_bytes = period_size * bytes_per_frame;
  818. const u32 pos = lx_stream->frame_pos;
  819. const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
  820. 0 : pos + 1;
  821. dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
  822. u32 buf_hi = 0;
  823. u32 buf_lo = 0;
  824. u32 buffer_index = 0;
  825. u32 needed, freed;
  826. u32 size_array[MAX_STREAM_BUFFER];
  827. dev_dbg(chip->card->dev, "->lx_interrupt_request_new_buffer\n");
  828. mutex_lock(&chip->lock);
  829. err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
  830. dev_dbg(chip->card->dev,
  831. "interrupt: needed %d, freed %d\n", needed, freed);
  832. unpack_pointer(buf, &buf_lo, &buf_hi);
  833. err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
  834. &buffer_index);
  835. dev_dbg(chip->card->dev,
  836. "interrupt: gave buffer index %x on 0x%lx (%d bytes)\n",
  837. buffer_index, (unsigned long)buf, period_bytes);
  838. lx_stream->frame_pos = next_pos;
  839. mutex_unlock(&chip->lock);
  840. return err;
  841. }
  842. irqreturn_t lx_interrupt(int irq, void *dev_id)
  843. {
  844. struct lx6464es *chip = dev_id;
  845. int async_pending, async_escmd;
  846. u32 irqsrc;
  847. bool wake_thread = false;
  848. dev_dbg(chip->card->dev,
  849. "**************************************************\n");
  850. if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
  851. dev_dbg(chip->card->dev, "IRQ_NONE\n");
  852. return IRQ_NONE; /* this device did not cause the interrupt */
  853. }
  854. if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
  855. return IRQ_HANDLED;
  856. if (irqsrc & MASK_SYS_STATUS_EOBI)
  857. dev_dbg(chip->card->dev, "interrupt: EOBI\n");
  858. if (irqsrc & MASK_SYS_STATUS_EOBO)
  859. dev_dbg(chip->card->dev, "interrupt: EOBO\n");
  860. if (irqsrc & MASK_SYS_STATUS_URUN)
  861. dev_dbg(chip->card->dev, "interrupt: URUN\n");
  862. if (irqsrc & MASK_SYS_STATUS_ORUN)
  863. dev_dbg(chip->card->dev, "interrupt: ORUN\n");
  864. if (async_pending) {
  865. wake_thread = true;
  866. chip->irqsrc = irqsrc;
  867. }
  868. if (async_escmd) {
  869. /* backdoor for ethersound commands
  870. *
  871. * for now, we do not need this
  872. *
  873. * */
  874. dev_dbg(chip->card->dev, "interrupt requests escmd handling\n");
  875. }
  876. return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
  877. }
  878. irqreturn_t lx_threaded_irq(int irq, void *dev_id)
  879. {
  880. struct lx6464es *chip = dev_id;
  881. u64 notified_in_pipe_mask = 0;
  882. u64 notified_out_pipe_mask = 0;
  883. int freq_changed;
  884. int err;
  885. /* handle async events */
  886. err = lx_interrupt_handle_async_events(chip, chip->irqsrc,
  887. &freq_changed,
  888. &notified_in_pipe_mask,
  889. &notified_out_pipe_mask);
  890. if (err)
  891. dev_err(chip->card->dev, "error handling async events\n");
  892. if (notified_in_pipe_mask) {
  893. struct lx_stream *lx_stream = &chip->capture_stream;
  894. dev_dbg(chip->card->dev,
  895. "requesting audio transfer for capture\n");
  896. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  897. if (err < 0)
  898. dev_err(chip->card->dev,
  899. "cannot request new buffer for capture\n");
  900. snd_pcm_period_elapsed(lx_stream->stream);
  901. }
  902. if (notified_out_pipe_mask) {
  903. struct lx_stream *lx_stream = &chip->playback_stream;
  904. dev_dbg(chip->card->dev,
  905. "requesting audio transfer for playback\n");
  906. err = lx_interrupt_request_new_buffer(chip, lx_stream);
  907. if (err < 0)
  908. dev_err(chip->card->dev,
  909. "cannot request new buffer for playback\n");
  910. snd_pcm_period_elapsed(lx_stream->stream);
  911. }
  912. return IRQ_HANDLED;
  913. }
  914. static void lx_irq_set(struct lx6464es *chip, int enable)
  915. {
  916. u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
  917. /* enable/disable interrupts
  918. *
  919. * Set the Doorbell and PCI interrupt enable bits
  920. *
  921. * */
  922. if (enable)
  923. reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  924. else
  925. reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
  926. lx_plx_reg_write(chip, ePLX_IRQCS, reg);
  927. }
  928. void lx_irq_enable(struct lx6464es *chip)
  929. {
  930. dev_dbg(chip->card->dev, "->lx_irq_enable\n");
  931. lx_irq_set(chip, 1);
  932. }
  933. void lx_irq_disable(struct lx6464es *chip)
  934. {
  935. dev_dbg(chip->card->dev, "->lx_irq_disable\n");
  936. lx_irq_set(chip, 0);
  937. }