cmt_speech.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462
  1. /*
  2. * cmt_speech.c - HSI CMT speech driver
  3. *
  4. * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
  5. *
  6. * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
  7. * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21. * 02110-1301 USA
  22. */
  23. #include <linux/errno.h>
  24. #include <linux/module.h>
  25. #include <linux/types.h>
  26. #include <linux/init.h>
  27. #include <linux/device.h>
  28. #include <linux/miscdevice.h>
  29. #include <linux/mm.h>
  30. #include <linux/slab.h>
  31. #include <linux/fs.h>
  32. #include <linux/poll.h>
  33. #include <linux/sched.h>
  34. #include <linux/ioctl.h>
  35. #include <linux/uaccess.h>
  36. #include <linux/pm_qos.h>
  37. #include <linux/hsi/hsi.h>
  38. #include <linux/hsi/ssi_protocol.h>
  39. #include <linux/hsi/cs-protocol.h>
  40. #define CS_MMAP_SIZE PAGE_SIZE
  41. struct char_queue {
  42. struct list_head list;
  43. u32 msg;
  44. };
  45. struct cs_char {
  46. unsigned int opened;
  47. struct hsi_client *cl;
  48. struct cs_hsi_iface *hi;
  49. struct list_head chardev_queue;
  50. struct list_head dataind_queue;
  51. int dataind_pending;
  52. /* mmap things */
  53. unsigned long mmap_base;
  54. unsigned long mmap_size;
  55. spinlock_t lock;
  56. struct fasync_struct *async_queue;
  57. wait_queue_head_t wait;
  58. /* hsi channel ids */
  59. int channel_id_cmd;
  60. int channel_id_data;
  61. };
  62. #define SSI_CHANNEL_STATE_READING 1
  63. #define SSI_CHANNEL_STATE_WRITING (1 << 1)
  64. #define SSI_CHANNEL_STATE_POLL (1 << 2)
  65. #define SSI_CHANNEL_STATE_ERROR (1 << 3)
  66. #define TARGET_MASK 0xf000000
  67. #define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT)
  68. #define TARGET_LOCAL 0
  69. /* Number of pre-allocated commands buffers */
  70. #define CS_MAX_CMDS 4
  71. /*
  72. * During data transfers, transactions must be handled
  73. * within 20ms (fixed value in cmtspeech HSI protocol)
  74. */
  75. #define CS_QOS_LATENCY_FOR_DATA_USEC 20000
  76. /* Timeout to wait for pending HSI transfers to complete */
  77. #define CS_HSI_TRANSFER_TIMEOUT_MS 500
  78. #define RX_PTR_BOUNDARY_SHIFT 8
  79. #define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \
  80. CS_MAX_BUFFERS_SHIFT)
  81. struct cs_hsi_iface {
  82. struct hsi_client *cl;
  83. struct hsi_client *master;
  84. unsigned int iface_state;
  85. unsigned int wakeline_state;
  86. unsigned int control_state;
  87. unsigned int data_state;
  88. /* state exposed to application */
  89. struct cs_mmap_config_block *mmap_cfg;
  90. unsigned long mmap_base;
  91. unsigned long mmap_size;
  92. unsigned int rx_slot;
  93. unsigned int tx_slot;
  94. /* note: for security reasons, we do not trust the contents of
  95. * mmap_cfg, but instead duplicate the variables here */
  96. unsigned int buf_size;
  97. unsigned int rx_bufs;
  98. unsigned int tx_bufs;
  99. unsigned int rx_ptr_boundary;
  100. unsigned int rx_offsets[CS_MAX_BUFFERS];
  101. unsigned int tx_offsets[CS_MAX_BUFFERS];
  102. /* size of aligned memory blocks */
  103. unsigned int slot_size;
  104. unsigned int flags;
  105. struct list_head cmdqueue;
  106. struct hsi_msg *data_rx_msg;
  107. struct hsi_msg *data_tx_msg;
  108. wait_queue_head_t datawait;
  109. struct pm_qos_request pm_qos_req;
  110. spinlock_t lock;
  111. };
  112. static struct cs_char cs_char_data;
  113. static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
  114. static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
  115. static inline void rx_ptr_shift_too_big(void)
  116. {
  117. BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
  118. }
  119. static void cs_notify(u32 message, struct list_head *head)
  120. {
  121. struct char_queue *entry;
  122. spin_lock(&cs_char_data.lock);
  123. if (!cs_char_data.opened) {
  124. spin_unlock(&cs_char_data.lock);
  125. goto out;
  126. }
  127. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  128. if (!entry) {
  129. dev_err(&cs_char_data.cl->device,
  130. "Can't allocate new entry for the queue.\n");
  131. spin_unlock(&cs_char_data.lock);
  132. goto out;
  133. }
  134. entry->msg = message;
  135. list_add_tail(&entry->list, head);
  136. spin_unlock(&cs_char_data.lock);
  137. wake_up_interruptible(&cs_char_data.wait);
  138. kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
  139. out:
  140. return;
  141. }
  142. static u32 cs_pop_entry(struct list_head *head)
  143. {
  144. struct char_queue *entry;
  145. u32 data;
  146. entry = list_entry(head->next, struct char_queue, list);
  147. data = entry->msg;
  148. list_del(&entry->list);
  149. kfree(entry);
  150. return data;
  151. }
  152. static void cs_notify_control(u32 message)
  153. {
  154. cs_notify(message, &cs_char_data.chardev_queue);
  155. }
  156. static void cs_notify_data(u32 message, int maxlength)
  157. {
  158. cs_notify(message, &cs_char_data.dataind_queue);
  159. spin_lock(&cs_char_data.lock);
  160. cs_char_data.dataind_pending++;
  161. while (cs_char_data.dataind_pending > maxlength &&
  162. !list_empty(&cs_char_data.dataind_queue)) {
  163. dev_dbg(&cs_char_data.cl->device, "data notification "
  164. "queue overrun (%u entries)\n", cs_char_data.dataind_pending);
  165. cs_pop_entry(&cs_char_data.dataind_queue);
  166. cs_char_data.dataind_pending--;
  167. }
  168. spin_unlock(&cs_char_data.lock);
  169. }
  170. static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
  171. {
  172. u32 *data = sg_virt(msg->sgt.sgl);
  173. *data = cmd;
  174. }
  175. static inline u32 cs_get_cmd(struct hsi_msg *msg)
  176. {
  177. u32 *data = sg_virt(msg->sgt.sgl);
  178. return *data;
  179. }
  180. static void cs_release_cmd(struct hsi_msg *msg)
  181. {
  182. struct cs_hsi_iface *hi = msg->context;
  183. list_add_tail(&msg->link, &hi->cmdqueue);
  184. }
  185. static void cs_cmd_destructor(struct hsi_msg *msg)
  186. {
  187. struct cs_hsi_iface *hi = msg->context;
  188. spin_lock(&hi->lock);
  189. dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
  190. if (hi->iface_state != CS_STATE_CLOSED)
  191. dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
  192. if (msg->ttype == HSI_MSG_READ)
  193. hi->control_state &=
  194. ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
  195. else if (msg->ttype == HSI_MSG_WRITE &&
  196. hi->control_state & SSI_CHANNEL_STATE_WRITING)
  197. hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
  198. cs_release_cmd(msg);
  199. spin_unlock(&hi->lock);
  200. }
  201. static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
  202. {
  203. struct hsi_msg *msg;
  204. BUG_ON(list_empty(&ssi->cmdqueue));
  205. msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
  206. list_del(&msg->link);
  207. msg->destructor = cs_cmd_destructor;
  208. return msg;
  209. }
  210. static void cs_free_cmds(struct cs_hsi_iface *ssi)
  211. {
  212. struct hsi_msg *msg, *tmp;
  213. list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
  214. list_del(&msg->link);
  215. msg->destructor = NULL;
  216. kfree(sg_virt(msg->sgt.sgl));
  217. hsi_free_msg(msg);
  218. }
  219. }
  220. static int cs_alloc_cmds(struct cs_hsi_iface *hi)
  221. {
  222. struct hsi_msg *msg;
  223. u32 *buf;
  224. unsigned int i;
  225. INIT_LIST_HEAD(&hi->cmdqueue);
  226. for (i = 0; i < CS_MAX_CMDS; i++) {
  227. msg = hsi_alloc_msg(1, GFP_KERNEL);
  228. if (!msg)
  229. goto out;
  230. buf = kmalloc(sizeof(*buf), GFP_KERNEL);
  231. if (!buf) {
  232. hsi_free_msg(msg);
  233. goto out;
  234. }
  235. sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
  236. msg->channel = cs_char_data.channel_id_cmd;
  237. msg->context = hi;
  238. list_add_tail(&msg->link, &hi->cmdqueue);
  239. }
  240. return 0;
  241. out:
  242. cs_free_cmds(hi);
  243. return -ENOMEM;
  244. }
  245. static void cs_hsi_data_destructor(struct hsi_msg *msg)
  246. {
  247. struct cs_hsi_iface *hi = msg->context;
  248. const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
  249. dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
  250. spin_lock(&hi->lock);
  251. if (hi->iface_state != CS_STATE_CLOSED)
  252. dev_err(&cs_char_data.cl->device,
  253. "Data %s flush while device active\n", dir);
  254. if (msg->ttype == HSI_MSG_READ)
  255. hi->data_state &=
  256. ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
  257. else
  258. hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
  259. msg->status = HSI_STATUS_COMPLETED;
  260. if (unlikely(waitqueue_active(&hi->datawait)))
  261. wake_up_interruptible(&hi->datawait);
  262. spin_unlock(&hi->lock);
  263. }
  264. static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
  265. {
  266. struct hsi_msg *txmsg, *rxmsg;
  267. int res = 0;
  268. rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
  269. if (!rxmsg) {
  270. res = -ENOMEM;
  271. goto out1;
  272. }
  273. rxmsg->channel = cs_char_data.channel_id_data;
  274. rxmsg->destructor = cs_hsi_data_destructor;
  275. rxmsg->context = hi;
  276. txmsg = hsi_alloc_msg(1, GFP_KERNEL);
  277. if (!txmsg) {
  278. res = -ENOMEM;
  279. goto out2;
  280. }
  281. txmsg->channel = cs_char_data.channel_id_data;
  282. txmsg->destructor = cs_hsi_data_destructor;
  283. txmsg->context = hi;
  284. hi->data_rx_msg = rxmsg;
  285. hi->data_tx_msg = txmsg;
  286. return 0;
  287. out2:
  288. hsi_free_msg(rxmsg);
  289. out1:
  290. return res;
  291. }
  292. static void cs_hsi_free_data_msg(struct hsi_msg *msg)
  293. {
  294. WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
  295. msg->status != HSI_STATUS_ERROR);
  296. hsi_free_msg(msg);
  297. }
  298. static void cs_hsi_free_data(struct cs_hsi_iface *hi)
  299. {
  300. cs_hsi_free_data_msg(hi->data_rx_msg);
  301. cs_hsi_free_data_msg(hi->data_tx_msg);
  302. }
  303. static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
  304. struct hsi_msg *msg, const char *info,
  305. unsigned int *state)
  306. {
  307. spin_lock(&hi->lock);
  308. dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
  309. info, msg->status, *state);
  310. }
  311. static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
  312. {
  313. spin_unlock(&hi->lock);
  314. }
  315. static inline void __cs_hsi_error_read_bits(unsigned int *state)
  316. {
  317. *state |= SSI_CHANNEL_STATE_ERROR;
  318. *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
  319. }
  320. static inline void __cs_hsi_error_write_bits(unsigned int *state)
  321. {
  322. *state |= SSI_CHANNEL_STATE_ERROR;
  323. *state &= ~SSI_CHANNEL_STATE_WRITING;
  324. }
  325. static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
  326. struct hsi_msg *msg)
  327. {
  328. __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
  329. cs_release_cmd(msg);
  330. __cs_hsi_error_read_bits(&hi->control_state);
  331. __cs_hsi_error_post(hi);
  332. }
  333. static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
  334. struct hsi_msg *msg)
  335. {
  336. __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
  337. cs_release_cmd(msg);
  338. __cs_hsi_error_write_bits(&hi->control_state);
  339. __cs_hsi_error_post(hi);
  340. }
  341. static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
  342. {
  343. __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
  344. __cs_hsi_error_read_bits(&hi->data_state);
  345. __cs_hsi_error_post(hi);
  346. }
  347. static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
  348. struct hsi_msg *msg)
  349. {
  350. __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
  351. __cs_hsi_error_write_bits(&hi->data_state);
  352. __cs_hsi_error_post(hi);
  353. }
  354. static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
  355. {
  356. u32 cmd = cs_get_cmd(msg);
  357. struct cs_hsi_iface *hi = msg->context;
  358. spin_lock(&hi->lock);
  359. hi->control_state &= ~SSI_CHANNEL_STATE_READING;
  360. if (msg->status == HSI_STATUS_ERROR) {
  361. dev_err(&hi->cl->device, "Control RX error detected\n");
  362. cs_hsi_control_read_error(hi, msg);
  363. spin_unlock(&hi->lock);
  364. goto out;
  365. }
  366. dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
  367. cs_release_cmd(msg);
  368. if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
  369. struct timespec tspec;
  370. struct cs_timestamp *tstamp =
  371. &hi->mmap_cfg->tstamp_rx_ctrl;
  372. ktime_get_ts(&tspec);
  373. tstamp->tv_sec = (__u32) tspec.tv_sec;
  374. tstamp->tv_nsec = (__u32) tspec.tv_nsec;
  375. }
  376. spin_unlock(&hi->lock);
  377. cs_notify_control(cmd);
  378. out:
  379. cs_hsi_read_on_control(hi);
  380. }
  381. static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
  382. {
  383. struct cs_hsi_iface *hi = msg->context;
  384. int ret;
  385. if (msg->status == HSI_STATUS_ERROR) {
  386. dev_err(&hi->cl->device, "Control peek RX error detected\n");
  387. cs_hsi_control_read_error(hi, msg);
  388. return;
  389. }
  390. WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
  391. dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
  392. msg->sgt.nents = 1;
  393. msg->complete = cs_hsi_read_on_control_complete;
  394. ret = hsi_async_read(hi->cl, msg);
  395. if (ret)
  396. cs_hsi_control_read_error(hi, msg);
  397. }
  398. static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
  399. {
  400. struct hsi_msg *msg;
  401. int ret;
  402. spin_lock(&hi->lock);
  403. if (hi->control_state & SSI_CHANNEL_STATE_READING) {
  404. dev_err(&hi->cl->device, "Control read already pending (%d)\n",
  405. hi->control_state);
  406. spin_unlock(&hi->lock);
  407. return;
  408. }
  409. if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
  410. dev_err(&hi->cl->device, "Control read error (%d)\n",
  411. hi->control_state);
  412. spin_unlock(&hi->lock);
  413. return;
  414. }
  415. hi->control_state |= SSI_CHANNEL_STATE_READING;
  416. dev_dbg(&hi->cl->device, "Issuing RX on control\n");
  417. msg = cs_claim_cmd(hi);
  418. spin_unlock(&hi->lock);
  419. msg->sgt.nents = 0;
  420. msg->complete = cs_hsi_peek_on_control_complete;
  421. ret = hsi_async_read(hi->cl, msg);
  422. if (ret)
  423. cs_hsi_control_read_error(hi, msg);
  424. }
  425. static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
  426. {
  427. struct cs_hsi_iface *hi = msg->context;
  428. if (msg->status == HSI_STATUS_COMPLETED) {
  429. spin_lock(&hi->lock);
  430. hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
  431. cs_release_cmd(msg);
  432. spin_unlock(&hi->lock);
  433. } else if (msg->status == HSI_STATUS_ERROR) {
  434. cs_hsi_control_write_error(hi, msg);
  435. } else {
  436. dev_err(&hi->cl->device,
  437. "unexpected status in control write callback %d\n",
  438. msg->status);
  439. }
  440. }
  441. static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
  442. {
  443. struct hsi_msg *msg;
  444. int ret;
  445. spin_lock(&hi->lock);
  446. if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
  447. spin_unlock(&hi->lock);
  448. return -EIO;
  449. }
  450. if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
  451. dev_err(&hi->cl->device,
  452. "Write still pending on control channel.\n");
  453. spin_unlock(&hi->lock);
  454. return -EBUSY;
  455. }
  456. hi->control_state |= SSI_CHANNEL_STATE_WRITING;
  457. msg = cs_claim_cmd(hi);
  458. spin_unlock(&hi->lock);
  459. cs_set_cmd(msg, message);
  460. msg->sgt.nents = 1;
  461. msg->complete = cs_hsi_write_on_control_complete;
  462. dev_dbg(&hi->cl->device,
  463. "Sending control message %08X\n", message);
  464. ret = hsi_async_write(hi->cl, msg);
  465. if (ret) {
  466. dev_err(&hi->cl->device,
  467. "async_write failed with %d\n", ret);
  468. cs_hsi_control_write_error(hi, msg);
  469. }
  470. /*
  471. * Make sure control read is always pending when issuing
  472. * new control writes. This is needed as the controller
  473. * may flush our messages if e.g. the peer device reboots
  474. * unexpectedly (and we cannot directly resubmit a new read from
  475. * the message destructor; see cs_cmd_destructor()).
  476. */
  477. if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
  478. dev_err(&hi->cl->device, "Restarting control reads\n");
  479. cs_hsi_read_on_control(hi);
  480. }
  481. return 0;
  482. }
  483. static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
  484. {
  485. struct cs_hsi_iface *hi = msg->context;
  486. u32 payload;
  487. if (unlikely(msg->status == HSI_STATUS_ERROR)) {
  488. cs_hsi_data_read_error(hi, msg);
  489. return;
  490. }
  491. spin_lock(&hi->lock);
  492. WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
  493. hi->data_state &= ~SSI_CHANNEL_STATE_READING;
  494. payload = CS_RX_DATA_RECEIVED;
  495. payload |= hi->rx_slot;
  496. hi->rx_slot++;
  497. hi->rx_slot %= hi->rx_ptr_boundary;
  498. /* expose current rx ptr in mmap area */
  499. hi->mmap_cfg->rx_ptr = hi->rx_slot;
  500. if (unlikely(waitqueue_active(&hi->datawait)))
  501. wake_up_interruptible(&hi->datawait);
  502. spin_unlock(&hi->lock);
  503. cs_notify_data(payload, hi->rx_bufs);
  504. cs_hsi_read_on_data(hi);
  505. }
  506. static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
  507. {
  508. struct cs_hsi_iface *hi = msg->context;
  509. u32 *address;
  510. int ret;
  511. if (unlikely(msg->status == HSI_STATUS_ERROR)) {
  512. cs_hsi_data_read_error(hi, msg);
  513. return;
  514. }
  515. if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
  516. dev_err(&hi->cl->device, "Data received in invalid state\n");
  517. cs_hsi_data_read_error(hi, msg);
  518. return;
  519. }
  520. spin_lock(&hi->lock);
  521. WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
  522. hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
  523. hi->data_state |= SSI_CHANNEL_STATE_READING;
  524. spin_unlock(&hi->lock);
  525. address = (u32 *)(hi->mmap_base +
  526. hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
  527. sg_init_one(msg->sgt.sgl, address, hi->buf_size);
  528. msg->sgt.nents = 1;
  529. msg->complete = cs_hsi_read_on_data_complete;
  530. ret = hsi_async_read(hi->cl, msg);
  531. if (ret)
  532. cs_hsi_data_read_error(hi, msg);
  533. }
  534. /*
  535. * Read/write transaction is ongoing. Returns false if in
  536. * SSI_CHANNEL_STATE_POLL state.
  537. */
  538. static inline int cs_state_xfer_active(unsigned int state)
  539. {
  540. return (state & SSI_CHANNEL_STATE_WRITING) ||
  541. (state & SSI_CHANNEL_STATE_READING);
  542. }
  543. /*
  544. * No pending read/writes
  545. */
  546. static inline int cs_state_idle(unsigned int state)
  547. {
  548. return !(state & ~SSI_CHANNEL_STATE_ERROR);
  549. }
  550. static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
  551. {
  552. struct hsi_msg *rxmsg;
  553. int ret;
  554. spin_lock(&hi->lock);
  555. if (hi->data_state &
  556. (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
  557. dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
  558. hi->data_state);
  559. spin_unlock(&hi->lock);
  560. return;
  561. }
  562. hi->data_state |= SSI_CHANNEL_STATE_POLL;
  563. spin_unlock(&hi->lock);
  564. rxmsg = hi->data_rx_msg;
  565. sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
  566. rxmsg->sgt.nents = 0;
  567. rxmsg->complete = cs_hsi_peek_on_data_complete;
  568. ret = hsi_async_read(hi->cl, rxmsg);
  569. if (ret)
  570. cs_hsi_data_read_error(hi, rxmsg);
  571. }
  572. static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
  573. {
  574. struct cs_hsi_iface *hi = msg->context;
  575. if (msg->status == HSI_STATUS_COMPLETED) {
  576. spin_lock(&hi->lock);
  577. hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
  578. if (unlikely(waitqueue_active(&hi->datawait)))
  579. wake_up_interruptible(&hi->datawait);
  580. spin_unlock(&hi->lock);
  581. } else {
  582. cs_hsi_data_write_error(hi, msg);
  583. }
  584. }
  585. static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
  586. {
  587. u32 *address;
  588. struct hsi_msg *txmsg;
  589. int ret;
  590. spin_lock(&hi->lock);
  591. if (hi->iface_state != CS_STATE_CONFIGURED) {
  592. dev_err(&hi->cl->device, "Not configured, aborting\n");
  593. ret = -EINVAL;
  594. goto error;
  595. }
  596. if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
  597. dev_err(&hi->cl->device, "HSI error, aborting\n");
  598. ret = -EIO;
  599. goto error;
  600. }
  601. if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
  602. dev_err(&hi->cl->device, "Write pending on data channel.\n");
  603. ret = -EBUSY;
  604. goto error;
  605. }
  606. hi->data_state |= SSI_CHANNEL_STATE_WRITING;
  607. spin_unlock(&hi->lock);
  608. hi->tx_slot = slot;
  609. address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
  610. txmsg = hi->data_tx_msg;
  611. sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
  612. txmsg->complete = cs_hsi_write_on_data_complete;
  613. ret = hsi_async_write(hi->cl, txmsg);
  614. if (ret)
  615. cs_hsi_data_write_error(hi, txmsg);
  616. return ret;
  617. error:
  618. spin_unlock(&hi->lock);
  619. if (ret == -EIO)
  620. cs_hsi_data_write_error(hi, hi->data_tx_msg);
  621. return ret;
  622. }
  623. static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
  624. {
  625. return hi->iface_state;
  626. }
  627. static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
  628. {
  629. int ret = 0;
  630. local_bh_disable();
  631. switch (cmd & TARGET_MASK) {
  632. case TARGET_REMOTE:
  633. ret = cs_hsi_write_on_control(hi, cmd);
  634. break;
  635. case TARGET_LOCAL:
  636. if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
  637. ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
  638. else
  639. ret = -EINVAL;
  640. break;
  641. default:
  642. ret = -EINVAL;
  643. break;
  644. }
  645. local_bh_enable();
  646. return ret;
  647. }
  648. static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
  649. {
  650. int change = 0;
  651. spin_lock_bh(&hi->lock);
  652. if (hi->wakeline_state != new_state) {
  653. hi->wakeline_state = new_state;
  654. change = 1;
  655. dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
  656. new_state, hi->cl);
  657. }
  658. spin_unlock_bh(&hi->lock);
  659. if (change) {
  660. if (new_state)
  661. ssip_slave_start_tx(hi->master);
  662. else
  663. ssip_slave_stop_tx(hi->master);
  664. }
  665. dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
  666. new_state, hi->cl);
  667. }
  668. static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
  669. {
  670. hi->rx_bufs = rx_bufs;
  671. hi->tx_bufs = tx_bufs;
  672. hi->mmap_cfg->rx_bufs = rx_bufs;
  673. hi->mmap_cfg->tx_bufs = tx_bufs;
  674. if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
  675. /*
  676. * For more robust overrun detection, let the rx
  677. * pointer run in range 0..'boundary-1'. Boundary
  678. * is a multiple of rx_bufs, and limited in max size
  679. * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff
  680. * calculation.
  681. */
  682. hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
  683. hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
  684. } else {
  685. hi->rx_ptr_boundary = hi->rx_bufs;
  686. }
  687. }
  688. static int check_buf_params(struct cs_hsi_iface *hi,
  689. const struct cs_buffer_config *buf_cfg)
  690. {
  691. size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
  692. (buf_cfg->rx_bufs + buf_cfg->tx_bufs);
  693. size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
  694. int r = 0;
  695. if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
  696. buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
  697. r = -EINVAL;
  698. } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
  699. dev_err(&hi->cl->device, "No space for the requested buffer "
  700. "configuration\n");
  701. r = -ENOBUFS;
  702. }
  703. return r;
  704. }
  705. /**
  706. * Block until pending data transfers have completed.
  707. */
  708. static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
  709. {
  710. int r = 0;
  711. spin_lock_bh(&hi->lock);
  712. if (!cs_state_xfer_active(hi->data_state)) {
  713. dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
  714. goto out;
  715. }
  716. for (;;) {
  717. int s;
  718. DEFINE_WAIT(wait);
  719. if (!cs_state_xfer_active(hi->data_state))
  720. goto out;
  721. if (signal_pending(current)) {
  722. r = -ERESTARTSYS;
  723. goto out;
  724. }
  725. /**
  726. * prepare_to_wait must be called with hi->lock held
  727. * so that callbacks can check for waitqueue_active()
  728. */
  729. prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
  730. spin_unlock_bh(&hi->lock);
  731. s = schedule_timeout(
  732. msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS));
  733. spin_lock_bh(&hi->lock);
  734. finish_wait(&hi->datawait, &wait);
  735. if (!s) {
  736. dev_dbg(&hi->cl->device,
  737. "hsi_data_sync timeout after %d ms\n",
  738. CS_HSI_TRANSFER_TIMEOUT_MS);
  739. r = -EIO;
  740. goto out;
  741. }
  742. }
  743. out:
  744. spin_unlock_bh(&hi->lock);
  745. dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
  746. return r;
  747. }
  748. static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
  749. struct cs_buffer_config *buf_cfg)
  750. {
  751. unsigned int data_start, i;
  752. BUG_ON(hi->buf_size == 0);
  753. set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
  754. hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
  755. dev_dbg(&hi->cl->device,
  756. "setting slot size to %u, buf size %u, align %u\n",
  757. hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
  758. data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
  759. dev_dbg(&hi->cl->device,
  760. "setting data start at %u, cfg block %u, align %u\n",
  761. data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
  762. for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
  763. hi->rx_offsets[i] = data_start + i * hi->slot_size;
  764. hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
  765. dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
  766. i, hi->rx_offsets[i]);
  767. }
  768. for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
  769. hi->tx_offsets[i] = data_start +
  770. (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
  771. hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
  772. dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
  773. i, hi->rx_offsets[i]);
  774. }
  775. hi->iface_state = CS_STATE_CONFIGURED;
  776. }
  777. static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
  778. {
  779. if (old_state == CS_STATE_CONFIGURED) {
  780. dev_dbg(&hi->cl->device,
  781. "closing data channel with slot size 0\n");
  782. hi->iface_state = CS_STATE_OPENED;
  783. }
  784. }
  785. static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
  786. struct cs_buffer_config *buf_cfg)
  787. {
  788. int r = 0;
  789. unsigned int old_state = hi->iface_state;
  790. spin_lock_bh(&hi->lock);
  791. /* Prevent new transactions during buffer reconfig */
  792. if (old_state == CS_STATE_CONFIGURED)
  793. hi->iface_state = CS_STATE_OPENED;
  794. spin_unlock_bh(&hi->lock);
  795. /*
  796. * make sure that no non-zero data reads are ongoing before
  797. * proceeding to change the buffer layout
  798. */
  799. r = cs_hsi_data_sync(hi);
  800. if (r < 0)
  801. return r;
  802. WARN_ON(cs_state_xfer_active(hi->data_state));
  803. spin_lock_bh(&hi->lock);
  804. r = check_buf_params(hi, buf_cfg);
  805. if (r < 0)
  806. goto error;
  807. hi->buf_size = buf_cfg->buf_size;
  808. hi->mmap_cfg->buf_size = hi->buf_size;
  809. hi->flags = buf_cfg->flags;
  810. hi->rx_slot = 0;
  811. hi->tx_slot = 0;
  812. hi->slot_size = 0;
  813. if (hi->buf_size)
  814. cs_hsi_data_enable(hi, buf_cfg);
  815. else
  816. cs_hsi_data_disable(hi, old_state);
  817. spin_unlock_bh(&hi->lock);
  818. if (old_state != hi->iface_state) {
  819. if (hi->iface_state == CS_STATE_CONFIGURED) {
  820. pm_qos_add_request(&hi->pm_qos_req,
  821. PM_QOS_CPU_DMA_LATENCY,
  822. CS_QOS_LATENCY_FOR_DATA_USEC);
  823. local_bh_disable();
  824. cs_hsi_read_on_data(hi);
  825. local_bh_enable();
  826. } else if (old_state == CS_STATE_CONFIGURED) {
  827. pm_qos_remove_request(&hi->pm_qos_req);
  828. }
  829. }
  830. return r;
  831. error:
  832. spin_unlock_bh(&hi->lock);
  833. return r;
  834. }
  835. static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
  836. unsigned long mmap_base, unsigned long mmap_size)
  837. {
  838. int err = 0;
  839. struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
  840. dev_dbg(&cl->device, "cs_hsi_start\n");
  841. if (!hsi_if) {
  842. err = -ENOMEM;
  843. goto leave0;
  844. }
  845. spin_lock_init(&hsi_if->lock);
  846. hsi_if->cl = cl;
  847. hsi_if->iface_state = CS_STATE_CLOSED;
  848. hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
  849. hsi_if->mmap_base = mmap_base;
  850. hsi_if->mmap_size = mmap_size;
  851. memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
  852. init_waitqueue_head(&hsi_if->datawait);
  853. err = cs_alloc_cmds(hsi_if);
  854. if (err < 0) {
  855. dev_err(&cl->device, "Unable to alloc HSI messages\n");
  856. goto leave1;
  857. }
  858. err = cs_hsi_alloc_data(hsi_if);
  859. if (err < 0) {
  860. dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
  861. goto leave2;
  862. }
  863. err = hsi_claim_port(cl, 1);
  864. if (err < 0) {
  865. dev_err(&cl->device,
  866. "Could not open, HSI port already claimed\n");
  867. goto leave3;
  868. }
  869. hsi_if->master = ssip_slave_get_master(cl);
  870. if (IS_ERR(hsi_if->master)) {
  871. err = PTR_ERR(hsi_if->master);
  872. dev_err(&cl->device, "Could not get HSI master client\n");
  873. goto leave4;
  874. }
  875. if (!ssip_slave_running(hsi_if->master)) {
  876. err = -ENODEV;
  877. dev_err(&cl->device,
  878. "HSI port not initialized\n");
  879. goto leave4;
  880. }
  881. hsi_if->iface_state = CS_STATE_OPENED;
  882. local_bh_disable();
  883. cs_hsi_read_on_control(hsi_if);
  884. local_bh_enable();
  885. dev_dbg(&cl->device, "cs_hsi_start...done\n");
  886. BUG_ON(!hi);
  887. *hi = hsi_if;
  888. return 0;
  889. leave4:
  890. hsi_release_port(cl);
  891. leave3:
  892. cs_hsi_free_data(hsi_if);
  893. leave2:
  894. cs_free_cmds(hsi_if);
  895. leave1:
  896. kfree(hsi_if);
  897. leave0:
  898. dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
  899. return err;
  900. }
  901. static void cs_hsi_stop(struct cs_hsi_iface *hi)
  902. {
  903. dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
  904. cs_hsi_set_wakeline(hi, 0);
  905. ssip_slave_put_master(hi->master);
  906. /* hsi_release_port() needs to be called with CS_STATE_CLOSED */
  907. hi->iface_state = CS_STATE_CLOSED;
  908. hsi_release_port(hi->cl);
  909. /*
  910. * hsi_release_port() should flush out all the pending
  911. * messages, so cs_state_idle() should be true for both
  912. * control and data channels.
  913. */
  914. WARN_ON(!cs_state_idle(hi->control_state));
  915. WARN_ON(!cs_state_idle(hi->data_state));
  916. if (pm_qos_request_active(&hi->pm_qos_req))
  917. pm_qos_remove_request(&hi->pm_qos_req);
  918. spin_lock_bh(&hi->lock);
  919. cs_hsi_free_data(hi);
  920. cs_free_cmds(hi);
  921. spin_unlock_bh(&hi->lock);
  922. kfree(hi);
  923. }
  924. static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  925. {
  926. struct cs_char *csdata = vma->vm_private_data;
  927. struct page *page;
  928. page = virt_to_page(csdata->mmap_base);
  929. get_page(page);
  930. vmf->page = page;
  931. return 0;
  932. }
  933. static const struct vm_operations_struct cs_char_vm_ops = {
  934. .fault = cs_char_vma_fault,
  935. };
  936. static int cs_char_fasync(int fd, struct file *file, int on)
  937. {
  938. struct cs_char *csdata = file->private_data;
  939. if (fasync_helper(fd, file, on, &csdata->async_queue) < 0)
  940. return -EIO;
  941. return 0;
  942. }
  943. static unsigned int cs_char_poll(struct file *file, poll_table *wait)
  944. {
  945. struct cs_char *csdata = file->private_data;
  946. unsigned int ret = 0;
  947. poll_wait(file, &cs_char_data.wait, wait);
  948. spin_lock_bh(&csdata->lock);
  949. if (!list_empty(&csdata->chardev_queue))
  950. ret = POLLIN | POLLRDNORM;
  951. else if (!list_empty(&csdata->dataind_queue))
  952. ret = POLLIN | POLLRDNORM;
  953. spin_unlock_bh(&csdata->lock);
  954. return ret;
  955. }
  956. static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
  957. loff_t *unused)
  958. {
  959. struct cs_char *csdata = file->private_data;
  960. u32 data;
  961. ssize_t retval;
  962. if (count < sizeof(data))
  963. return -EINVAL;
  964. for (;;) {
  965. DEFINE_WAIT(wait);
  966. spin_lock_bh(&csdata->lock);
  967. if (!list_empty(&csdata->chardev_queue)) {
  968. data = cs_pop_entry(&csdata->chardev_queue);
  969. } else if (!list_empty(&csdata->dataind_queue)) {
  970. data = cs_pop_entry(&csdata->dataind_queue);
  971. csdata->dataind_pending--;
  972. } else {
  973. data = 0;
  974. }
  975. spin_unlock_bh(&csdata->lock);
  976. if (data)
  977. break;
  978. if (file->f_flags & O_NONBLOCK) {
  979. retval = -EAGAIN;
  980. goto out;
  981. } else if (signal_pending(current)) {
  982. retval = -ERESTARTSYS;
  983. goto out;
  984. }
  985. prepare_to_wait_exclusive(&csdata->wait, &wait,
  986. TASK_INTERRUPTIBLE);
  987. schedule();
  988. finish_wait(&csdata->wait, &wait);
  989. }
  990. retval = put_user(data, (u32 __user *)buf);
  991. if (!retval)
  992. retval = sizeof(data);
  993. out:
  994. return retval;
  995. }
  996. static ssize_t cs_char_write(struct file *file, const char __user *buf,
  997. size_t count, loff_t *unused)
  998. {
  999. struct cs_char *csdata = file->private_data;
  1000. u32 data;
  1001. int err;
  1002. ssize_t retval;
  1003. if (count < sizeof(data))
  1004. return -EINVAL;
  1005. if (get_user(data, (u32 __user *)buf))
  1006. retval = -EFAULT;
  1007. else
  1008. retval = count;
  1009. err = cs_hsi_command(csdata->hi, data);
  1010. if (err < 0)
  1011. retval = err;
  1012. return retval;
  1013. }
  1014. static long cs_char_ioctl(struct file *file, unsigned int cmd,
  1015. unsigned long arg)
  1016. {
  1017. struct cs_char *csdata = file->private_data;
  1018. int r = 0;
  1019. switch (cmd) {
  1020. case CS_GET_STATE: {
  1021. unsigned int state;
  1022. state = cs_hsi_get_state(csdata->hi);
  1023. if (copy_to_user((void __user *)arg, &state, sizeof(state)))
  1024. r = -EFAULT;
  1025. break;
  1026. }
  1027. case CS_SET_WAKELINE: {
  1028. unsigned int state;
  1029. if (copy_from_user(&state, (void __user *)arg, sizeof(state))) {
  1030. r = -EFAULT;
  1031. break;
  1032. }
  1033. if (state > 1) {
  1034. r = -EINVAL;
  1035. break;
  1036. }
  1037. cs_hsi_set_wakeline(csdata->hi, !!state);
  1038. break;
  1039. }
  1040. case CS_GET_IF_VERSION: {
  1041. unsigned int ifver = CS_IF_VERSION;
  1042. if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
  1043. r = -EFAULT;
  1044. break;
  1045. }
  1046. case CS_CONFIG_BUFS: {
  1047. struct cs_buffer_config buf_cfg;
  1048. if (copy_from_user(&buf_cfg, (void __user *)arg,
  1049. sizeof(buf_cfg)))
  1050. r = -EFAULT;
  1051. else
  1052. r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
  1053. break;
  1054. }
  1055. default:
  1056. r = -ENOTTY;
  1057. break;
  1058. }
  1059. return r;
  1060. }
  1061. static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
  1062. {
  1063. if (vma->vm_end < vma->vm_start)
  1064. return -EINVAL;
  1065. if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) != 1)
  1066. return -EINVAL;
  1067. vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
  1068. vma->vm_ops = &cs_char_vm_ops;
  1069. vma->vm_private_data = file->private_data;
  1070. return 0;
  1071. }
  1072. static int cs_char_open(struct inode *unused, struct file *file)
  1073. {
  1074. int ret = 0;
  1075. unsigned long p;
  1076. spin_lock_bh(&cs_char_data.lock);
  1077. if (cs_char_data.opened) {
  1078. ret = -EBUSY;
  1079. spin_unlock_bh(&cs_char_data.lock);
  1080. goto out1;
  1081. }
  1082. cs_char_data.opened = 1;
  1083. cs_char_data.dataind_pending = 0;
  1084. spin_unlock_bh(&cs_char_data.lock);
  1085. p = get_zeroed_page(GFP_KERNEL);
  1086. if (!p) {
  1087. ret = -ENOMEM;
  1088. goto out2;
  1089. }
  1090. ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
  1091. if (ret) {
  1092. dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
  1093. goto out3;
  1094. }
  1095. /* these are only used in release so lock not needed */
  1096. cs_char_data.mmap_base = p;
  1097. cs_char_data.mmap_size = CS_MMAP_SIZE;
  1098. file->private_data = &cs_char_data;
  1099. return 0;
  1100. out3:
  1101. free_page(p);
  1102. out2:
  1103. spin_lock_bh(&cs_char_data.lock);
  1104. cs_char_data.opened = 0;
  1105. spin_unlock_bh(&cs_char_data.lock);
  1106. out1:
  1107. return ret;
  1108. }
  1109. static void cs_free_char_queue(struct list_head *head)
  1110. {
  1111. struct char_queue *entry;
  1112. struct list_head *cursor, *next;
  1113. if (!list_empty(head)) {
  1114. list_for_each_safe(cursor, next, head) {
  1115. entry = list_entry(cursor, struct char_queue, list);
  1116. list_del(&entry->list);
  1117. kfree(entry);
  1118. }
  1119. }
  1120. }
  1121. static int cs_char_release(struct inode *unused, struct file *file)
  1122. {
  1123. struct cs_char *csdata = file->private_data;
  1124. cs_hsi_stop(csdata->hi);
  1125. spin_lock_bh(&csdata->lock);
  1126. csdata->hi = NULL;
  1127. free_page(csdata->mmap_base);
  1128. cs_free_char_queue(&csdata->chardev_queue);
  1129. cs_free_char_queue(&csdata->dataind_queue);
  1130. csdata->opened = 0;
  1131. spin_unlock_bh(&csdata->lock);
  1132. return 0;
  1133. }
  1134. static const struct file_operations cs_char_fops = {
  1135. .owner = THIS_MODULE,
  1136. .read = cs_char_read,
  1137. .write = cs_char_write,
  1138. .poll = cs_char_poll,
  1139. .unlocked_ioctl = cs_char_ioctl,
  1140. .mmap = cs_char_mmap,
  1141. .open = cs_char_open,
  1142. .release = cs_char_release,
  1143. .fasync = cs_char_fasync,
  1144. };
  1145. static struct miscdevice cs_char_miscdev = {
  1146. .minor = MISC_DYNAMIC_MINOR,
  1147. .name = "cmt_speech",
  1148. .fops = &cs_char_fops
  1149. };
  1150. static int cs_hsi_client_probe(struct device *dev)
  1151. {
  1152. int err = 0;
  1153. struct hsi_client *cl = to_hsi_client(dev);
  1154. dev_dbg(dev, "hsi_client_probe\n");
  1155. init_waitqueue_head(&cs_char_data.wait);
  1156. spin_lock_init(&cs_char_data.lock);
  1157. cs_char_data.opened = 0;
  1158. cs_char_data.cl = cl;
  1159. cs_char_data.hi = NULL;
  1160. INIT_LIST_HEAD(&cs_char_data.chardev_queue);
  1161. INIT_LIST_HEAD(&cs_char_data.dataind_queue);
  1162. cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
  1163. "speech-control");
  1164. if (cs_char_data.channel_id_cmd < 0) {
  1165. err = cs_char_data.channel_id_cmd;
  1166. dev_err(dev, "Could not get cmd channel (%d)\n", err);
  1167. return err;
  1168. }
  1169. cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
  1170. "speech-data");
  1171. if (cs_char_data.channel_id_data < 0) {
  1172. err = cs_char_data.channel_id_data;
  1173. dev_err(dev, "Could not get data channel (%d)\n", err);
  1174. return err;
  1175. }
  1176. err = misc_register(&cs_char_miscdev);
  1177. if (err)
  1178. dev_err(dev, "Failed to register: %d\n", err);
  1179. return err;
  1180. }
  1181. static int cs_hsi_client_remove(struct device *dev)
  1182. {
  1183. struct cs_hsi_iface *hi;
  1184. dev_dbg(dev, "hsi_client_remove\n");
  1185. misc_deregister(&cs_char_miscdev);
  1186. spin_lock_bh(&cs_char_data.lock);
  1187. hi = cs_char_data.hi;
  1188. cs_char_data.hi = NULL;
  1189. spin_unlock_bh(&cs_char_data.lock);
  1190. if (hi)
  1191. cs_hsi_stop(hi);
  1192. return 0;
  1193. }
  1194. static struct hsi_client_driver cs_hsi_driver = {
  1195. .driver = {
  1196. .name = "cmt-speech",
  1197. .owner = THIS_MODULE,
  1198. .probe = cs_hsi_client_probe,
  1199. .remove = cs_hsi_client_remove,
  1200. },
  1201. };
  1202. static int __init cs_char_init(void)
  1203. {
  1204. pr_info("CMT speech driver added\n");
  1205. return hsi_register_client_driver(&cs_hsi_driver);
  1206. }
  1207. module_init(cs_char_init);
  1208. static void __exit cs_char_exit(void)
  1209. {
  1210. hsi_unregister_client_driver(&cs_hsi_driver);
  1211. pr_info("CMT speech driver removed\n");
  1212. }
  1213. module_exit(cs_char_exit);
  1214. MODULE_ALIAS("hsi:cmt-speech");
  1215. MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>");
  1216. MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
  1217. MODULE_DESCRIPTION("CMT speech driver");
  1218. MODULE_LICENSE("GPL v2");