omap_ssi_port.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398
  1. /* OMAP SSI port driver.
  2. *
  3. * Copyright (C) 2010 Nokia Corporation. All rights reserved.
  4. * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
  5. *
  6. * Contact: Carlos Chinea <carlos.chinea@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  20. * 02110-1301 USA
  21. */
  22. #include <linux/platform_device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/of_gpio.h>
  26. #include <linux/debugfs.h>
  27. #include "omap_ssi_regs.h"
  28. #include "omap_ssi.h"
  29. static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
  30. {
  31. return 0;
  32. }
  33. static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
  34. {
  35. return 0;
  36. }
  37. static inline unsigned int ssi_wakein(struct hsi_port *port)
  38. {
  39. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  40. return gpio_get_value(omap_port->wake_gpio);
  41. }
  42. #ifdef CONFIG_DEBUG_FS
  43. static void ssi_debug_remove_port(struct hsi_port *port)
  44. {
  45. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  46. debugfs_remove_recursive(omap_port->dir);
  47. }
  48. static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
  49. {
  50. struct hsi_port *port = m->private;
  51. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  52. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  53. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  54. void __iomem *base = omap_ssi->sys;
  55. unsigned int ch;
  56. pm_runtime_get_sync(omap_port->pdev);
  57. if (omap_port->wake_irq > 0)
  58. seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
  59. seq_printf(m, "WAKE\t\t: 0x%08x\n",
  60. readl(base + SSI_WAKE_REG(port->num)));
  61. seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
  62. readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
  63. seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
  64. readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
  65. /* SST */
  66. base = omap_port->sst_base;
  67. seq_puts(m, "\nSST\n===\n");
  68. seq_printf(m, "ID SST\t\t: 0x%08x\n",
  69. readl(base + SSI_SST_ID_REG));
  70. seq_printf(m, "MODE\t\t: 0x%08x\n",
  71. readl(base + SSI_SST_MODE_REG));
  72. seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
  73. readl(base + SSI_SST_FRAMESIZE_REG));
  74. seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
  75. readl(base + SSI_SST_DIVISOR_REG));
  76. seq_printf(m, "CHANNELS\t: 0x%08x\n",
  77. readl(base + SSI_SST_CHANNELS_REG));
  78. seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
  79. readl(base + SSI_SST_ARBMODE_REG));
  80. seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
  81. readl(base + SSI_SST_TXSTATE_REG));
  82. seq_printf(m, "BUFSTATE\t: 0x%08x\n",
  83. readl(base + SSI_SST_BUFSTATE_REG));
  84. seq_printf(m, "BREAK\t\t: 0x%08x\n",
  85. readl(base + SSI_SST_BREAK_REG));
  86. for (ch = 0; ch < omap_port->channels; ch++) {
  87. seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
  88. readl(base + SSI_SST_BUFFER_CH_REG(ch)));
  89. }
  90. /* SSR */
  91. base = omap_port->ssr_base;
  92. seq_puts(m, "\nSSR\n===\n");
  93. seq_printf(m, "ID SSR\t\t: 0x%08x\n",
  94. readl(base + SSI_SSR_ID_REG));
  95. seq_printf(m, "MODE\t\t: 0x%08x\n",
  96. readl(base + SSI_SSR_MODE_REG));
  97. seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
  98. readl(base + SSI_SSR_FRAMESIZE_REG));
  99. seq_printf(m, "CHANNELS\t: 0x%08x\n",
  100. readl(base + SSI_SSR_CHANNELS_REG));
  101. seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
  102. readl(base + SSI_SSR_TIMEOUT_REG));
  103. seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
  104. readl(base + SSI_SSR_RXSTATE_REG));
  105. seq_printf(m, "BUFSTATE\t: 0x%08x\n",
  106. readl(base + SSI_SSR_BUFSTATE_REG));
  107. seq_printf(m, "BREAK\t\t: 0x%08x\n",
  108. readl(base + SSI_SSR_BREAK_REG));
  109. seq_printf(m, "ERROR\t\t: 0x%08x\n",
  110. readl(base + SSI_SSR_ERROR_REG));
  111. seq_printf(m, "ERRORACK\t: 0x%08x\n",
  112. readl(base + SSI_SSR_ERRORACK_REG));
  113. for (ch = 0; ch < omap_port->channels; ch++) {
  114. seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
  115. readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
  116. }
  117. pm_runtime_put_sync(omap_port->pdev);
  118. return 0;
  119. }
  120. static int ssi_port_regs_open(struct inode *inode, struct file *file)
  121. {
  122. return single_open(file, ssi_debug_port_show, inode->i_private);
  123. }
  124. static const struct file_operations ssi_port_regs_fops = {
  125. .open = ssi_port_regs_open,
  126. .read = seq_read,
  127. .llseek = seq_lseek,
  128. .release = single_release,
  129. };
  130. static int ssi_div_get(void *data, u64 *val)
  131. {
  132. struct hsi_port *port = data;
  133. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  134. pm_runtime_get_sync(omap_port->pdev);
  135. *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
  136. pm_runtime_put_sync(omap_port->pdev);
  137. return 0;
  138. }
  139. static int ssi_div_set(void *data, u64 val)
  140. {
  141. struct hsi_port *port = data;
  142. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  143. if (val > 127)
  144. return -EINVAL;
  145. pm_runtime_get_sync(omap_port->pdev);
  146. writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
  147. omap_port->sst.divisor = val;
  148. pm_runtime_put_sync(omap_port->pdev);
  149. return 0;
  150. }
  151. DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
  152. static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port,
  153. struct dentry *dir)
  154. {
  155. struct hsi_port *port = to_hsi_port(omap_port->dev);
  156. dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
  157. if (!dir)
  158. return -ENOMEM;
  159. omap_port->dir = dir;
  160. debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
  161. dir = debugfs_create_dir("sst", dir);
  162. if (!dir)
  163. return -ENOMEM;
  164. debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
  165. &ssi_sst_div_fops);
  166. return 0;
  167. }
  168. #endif
  169. static int ssi_claim_lch(struct hsi_msg *msg)
  170. {
  171. struct hsi_port *port = hsi_get_port(msg->cl);
  172. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  173. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  174. int lch;
  175. for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
  176. if (!omap_ssi->gdd_trn[lch].msg) {
  177. omap_ssi->gdd_trn[lch].msg = msg;
  178. omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
  179. return lch;
  180. }
  181. return -EBUSY;
  182. }
  183. static int ssi_start_dma(struct hsi_msg *msg, int lch)
  184. {
  185. struct hsi_port *port = hsi_get_port(msg->cl);
  186. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  187. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  188. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  189. void __iomem *gdd = omap_ssi->gdd;
  190. int err;
  191. u16 csdp;
  192. u16 ccr;
  193. u32 s_addr;
  194. u32 d_addr;
  195. u32 tmp;
  196. if (msg->ttype == HSI_MSG_READ) {
  197. err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
  198. DMA_FROM_DEVICE);
  199. if (err < 0) {
  200. dev_dbg(&ssi->device, "DMA map SG failed !\n");
  201. return err;
  202. }
  203. csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
  204. SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
  205. SSI_DATA_TYPE_S32;
  206. ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
  207. ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
  208. SSI_CCR_ENABLE;
  209. s_addr = omap_port->ssr_dma +
  210. SSI_SSR_BUFFER_CH_REG(msg->channel);
  211. d_addr = sg_dma_address(msg->sgt.sgl);
  212. } else {
  213. err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
  214. DMA_TO_DEVICE);
  215. if (err < 0) {
  216. dev_dbg(&ssi->device, "DMA map SG failed !\n");
  217. return err;
  218. }
  219. csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
  220. SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
  221. SSI_DATA_TYPE_S32;
  222. ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
  223. ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
  224. SSI_CCR_ENABLE;
  225. s_addr = sg_dma_address(msg->sgt.sgl);
  226. d_addr = omap_port->sst_dma +
  227. SSI_SST_BUFFER_CH_REG(msg->channel);
  228. }
  229. dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
  230. lch, csdp, ccr, s_addr, d_addr);
  231. /* Hold clocks during the transfer */
  232. pm_runtime_get_sync(omap_port->pdev);
  233. writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
  234. writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
  235. writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
  236. writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
  237. writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
  238. gdd + SSI_GDD_CEN_REG(lch));
  239. spin_lock_bh(&omap_ssi->lock);
  240. tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  241. tmp |= SSI_GDD_LCH(lch);
  242. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  243. spin_unlock_bh(&omap_ssi->lock);
  244. writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
  245. msg->status = HSI_STATUS_PROCEEDING;
  246. return 0;
  247. }
  248. static int ssi_start_pio(struct hsi_msg *msg)
  249. {
  250. struct hsi_port *port = hsi_get_port(msg->cl);
  251. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  252. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  253. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  254. u32 val;
  255. pm_runtime_get_sync(omap_port->pdev);
  256. if (msg->ttype == HSI_MSG_WRITE) {
  257. val = SSI_DATAACCEPT(msg->channel);
  258. /* Hold clocks for pio writes */
  259. pm_runtime_get_sync(omap_port->pdev);
  260. } else {
  261. val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
  262. }
  263. dev_dbg(&port->device, "Single %s transfer\n",
  264. msg->ttype ? "write" : "read");
  265. val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  266. writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  267. pm_runtime_put_sync(omap_port->pdev);
  268. msg->actual_len = 0;
  269. msg->status = HSI_STATUS_PROCEEDING;
  270. return 0;
  271. }
  272. static int ssi_start_transfer(struct list_head *queue)
  273. {
  274. struct hsi_msg *msg;
  275. int lch = -1;
  276. if (list_empty(queue))
  277. return 0;
  278. msg = list_first_entry(queue, struct hsi_msg, link);
  279. if (msg->status != HSI_STATUS_QUEUED)
  280. return 0;
  281. if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
  282. lch = ssi_claim_lch(msg);
  283. if (lch >= 0)
  284. return ssi_start_dma(msg, lch);
  285. else
  286. return ssi_start_pio(msg);
  287. }
  288. static int ssi_async_break(struct hsi_msg *msg)
  289. {
  290. struct hsi_port *port = hsi_get_port(msg->cl);
  291. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  292. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  293. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  294. int err = 0;
  295. u32 tmp;
  296. pm_runtime_get_sync(omap_port->pdev);
  297. if (msg->ttype == HSI_MSG_WRITE) {
  298. if (omap_port->sst.mode != SSI_MODE_FRAME) {
  299. err = -EINVAL;
  300. goto out;
  301. }
  302. writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
  303. msg->status = HSI_STATUS_COMPLETED;
  304. msg->complete(msg);
  305. } else {
  306. if (omap_port->ssr.mode != SSI_MODE_FRAME) {
  307. err = -EINVAL;
  308. goto out;
  309. }
  310. spin_lock_bh(&omap_port->lock);
  311. tmp = readl(omap_ssi->sys +
  312. SSI_MPU_ENABLE_REG(port->num, 0));
  313. writel(tmp | SSI_BREAKDETECTED,
  314. omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  315. msg->status = HSI_STATUS_PROCEEDING;
  316. list_add_tail(&msg->link, &omap_port->brkqueue);
  317. spin_unlock_bh(&omap_port->lock);
  318. }
  319. out:
  320. pm_runtime_put_sync(omap_port->pdev);
  321. return err;
  322. }
  323. static int ssi_async(struct hsi_msg *msg)
  324. {
  325. struct hsi_port *port = hsi_get_port(msg->cl);
  326. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  327. struct list_head *queue;
  328. int err = 0;
  329. BUG_ON(!msg);
  330. if (msg->sgt.nents > 1)
  331. return -ENOSYS; /* TODO: Add sg support */
  332. if (msg->break_frame)
  333. return ssi_async_break(msg);
  334. if (msg->ttype) {
  335. BUG_ON(msg->channel >= omap_port->sst.channels);
  336. queue = &omap_port->txqueue[msg->channel];
  337. } else {
  338. BUG_ON(msg->channel >= omap_port->ssr.channels);
  339. queue = &omap_port->rxqueue[msg->channel];
  340. }
  341. msg->status = HSI_STATUS_QUEUED;
  342. spin_lock_bh(&omap_port->lock);
  343. list_add_tail(&msg->link, queue);
  344. err = ssi_start_transfer(queue);
  345. if (err < 0) {
  346. list_del(&msg->link);
  347. msg->status = HSI_STATUS_ERROR;
  348. }
  349. spin_unlock_bh(&omap_port->lock);
  350. dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
  351. msg->status, msg->ttype, msg->channel);
  352. return err;
  353. }
  354. static u32 ssi_calculate_div(struct hsi_controller *ssi)
  355. {
  356. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  357. u32 tx_fckrate = (u32) omap_ssi->fck_rate;
  358. /* / 2 : SSI TX clock is always half of the SSI functional clock */
  359. tx_fckrate >>= 1;
  360. /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
  361. tx_fckrate--;
  362. dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
  363. tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
  364. omap_ssi->max_speed);
  365. return tx_fckrate / omap_ssi->max_speed;
  366. }
  367. static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
  368. {
  369. struct list_head *node, *tmp;
  370. struct hsi_msg *msg;
  371. list_for_each_safe(node, tmp, queue) {
  372. msg = list_entry(node, struct hsi_msg, link);
  373. if ((cl) && (cl != msg->cl))
  374. continue;
  375. list_del(node);
  376. pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
  377. msg->channel, msg, msg->sgt.sgl->length,
  378. msg->ttype, msg->context);
  379. if (msg->destructor)
  380. msg->destructor(msg);
  381. else
  382. hsi_free_msg(msg);
  383. }
  384. }
  385. static int ssi_setup(struct hsi_client *cl)
  386. {
  387. struct hsi_port *port = to_hsi_port(cl->device.parent);
  388. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  389. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  390. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  391. void __iomem *sst = omap_port->sst_base;
  392. void __iomem *ssr = omap_port->ssr_base;
  393. u32 div;
  394. u32 val;
  395. int err = 0;
  396. pm_runtime_get_sync(omap_port->pdev);
  397. spin_lock_bh(&omap_port->lock);
  398. if (cl->tx_cfg.speed)
  399. omap_ssi->max_speed = cl->tx_cfg.speed;
  400. div = ssi_calculate_div(ssi);
  401. if (div > SSI_MAX_DIVISOR) {
  402. dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
  403. cl->tx_cfg.speed, div);
  404. err = -EINVAL;
  405. goto out;
  406. }
  407. /* Set TX/RX module to sleep to stop TX/RX during cfg update */
  408. writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
  409. writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
  410. /* Flush posted write */
  411. val = readl(ssr + SSI_SSR_MODE_REG);
  412. /* TX */
  413. writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
  414. writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
  415. writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
  416. writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
  417. writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
  418. /* RX */
  419. writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
  420. writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
  421. writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
  422. /* Cleanup the break queue if we leave FRAME mode */
  423. if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
  424. (cl->rx_cfg.mode != SSI_MODE_FRAME))
  425. ssi_flush_queue(&omap_port->brkqueue, cl);
  426. writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
  427. omap_port->channels = max(cl->rx_cfg.num_hw_channels,
  428. cl->tx_cfg.num_hw_channels);
  429. /* Shadow registering for OFF mode */
  430. /* SST */
  431. omap_port->sst.divisor = div;
  432. omap_port->sst.frame_size = 31;
  433. omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
  434. omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
  435. omap_port->sst.mode = cl->tx_cfg.mode;
  436. /* SSR */
  437. omap_port->ssr.frame_size = 31;
  438. omap_port->ssr.timeout = 0;
  439. omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
  440. omap_port->ssr.mode = cl->rx_cfg.mode;
  441. out:
  442. spin_unlock_bh(&omap_port->lock);
  443. pm_runtime_put_sync(omap_port->pdev);
  444. return err;
  445. }
  446. static int ssi_flush(struct hsi_client *cl)
  447. {
  448. struct hsi_port *port = hsi_get_port(cl);
  449. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  450. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  451. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  452. struct hsi_msg *msg;
  453. void __iomem *sst = omap_port->sst_base;
  454. void __iomem *ssr = omap_port->ssr_base;
  455. unsigned int i;
  456. u32 err;
  457. pm_runtime_get_sync(omap_port->pdev);
  458. spin_lock_bh(&omap_port->lock);
  459. /* Stop all DMA transfers */
  460. for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
  461. msg = omap_ssi->gdd_trn[i].msg;
  462. if (!msg || (port != hsi_get_port(msg->cl)))
  463. continue;
  464. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  465. if (msg->ttype == HSI_MSG_READ)
  466. pm_runtime_put_sync(omap_port->pdev);
  467. omap_ssi->gdd_trn[i].msg = NULL;
  468. }
  469. /* Flush all SST buffers */
  470. writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
  471. writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
  472. /* Flush all SSR buffers */
  473. writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
  474. writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
  475. /* Flush all errors */
  476. err = readl(ssr + SSI_SSR_ERROR_REG);
  477. writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
  478. /* Flush break */
  479. writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
  480. /* Clear interrupts */
  481. writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  482. writel_relaxed(0xffffff00,
  483. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  484. writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  485. writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
  486. /* Dequeue all pending requests */
  487. for (i = 0; i < omap_port->channels; i++) {
  488. /* Release write clocks */
  489. if (!list_empty(&omap_port->txqueue[i]))
  490. pm_runtime_put_sync(omap_port->pdev);
  491. ssi_flush_queue(&omap_port->txqueue[i], NULL);
  492. ssi_flush_queue(&omap_port->rxqueue[i], NULL);
  493. }
  494. ssi_flush_queue(&omap_port->brkqueue, NULL);
  495. spin_unlock_bh(&omap_port->lock);
  496. pm_runtime_put_sync(omap_port->pdev);
  497. return 0;
  498. }
  499. static int ssi_start_tx(struct hsi_client *cl)
  500. {
  501. struct hsi_port *port = hsi_get_port(cl);
  502. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  503. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  504. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  505. dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
  506. spin_lock_bh(&omap_port->wk_lock);
  507. if (omap_port->wk_refcount++) {
  508. spin_unlock_bh(&omap_port->wk_lock);
  509. return 0;
  510. }
  511. pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
  512. writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
  513. spin_unlock_bh(&omap_port->wk_lock);
  514. return 0;
  515. }
  516. static int ssi_stop_tx(struct hsi_client *cl)
  517. {
  518. struct hsi_port *port = hsi_get_port(cl);
  519. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  520. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  521. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  522. dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
  523. spin_lock_bh(&omap_port->wk_lock);
  524. BUG_ON(!omap_port->wk_refcount);
  525. if (--omap_port->wk_refcount) {
  526. spin_unlock_bh(&omap_port->wk_lock);
  527. return 0;
  528. }
  529. writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
  530. pm_runtime_put_sync(omap_port->pdev); /* Release clocks */
  531. spin_unlock_bh(&omap_port->wk_lock);
  532. return 0;
  533. }
  534. static void ssi_transfer(struct omap_ssi_port *omap_port,
  535. struct list_head *queue)
  536. {
  537. struct hsi_msg *msg;
  538. int err = -1;
  539. spin_lock_bh(&omap_port->lock);
  540. while (err < 0) {
  541. err = ssi_start_transfer(queue);
  542. if (err < 0) {
  543. msg = list_first_entry(queue, struct hsi_msg, link);
  544. msg->status = HSI_STATUS_ERROR;
  545. msg->actual_len = 0;
  546. list_del(&msg->link);
  547. spin_unlock_bh(&omap_port->lock);
  548. msg->complete(msg);
  549. spin_lock_bh(&omap_port->lock);
  550. }
  551. }
  552. spin_unlock_bh(&omap_port->lock);
  553. }
  554. static void ssi_cleanup_queues(struct hsi_client *cl)
  555. {
  556. struct hsi_port *port = hsi_get_port(cl);
  557. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  558. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  559. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  560. struct hsi_msg *msg;
  561. unsigned int i;
  562. u32 rxbufstate = 0;
  563. u32 txbufstate = 0;
  564. u32 status = SSI_ERROROCCURED;
  565. u32 tmp;
  566. ssi_flush_queue(&omap_port->brkqueue, cl);
  567. if (list_empty(&omap_port->brkqueue))
  568. status |= SSI_BREAKDETECTED;
  569. for (i = 0; i < omap_port->channels; i++) {
  570. if (list_empty(&omap_port->txqueue[i]))
  571. continue;
  572. msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
  573. link);
  574. if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
  575. txbufstate |= (1 << i);
  576. status |= SSI_DATAACCEPT(i);
  577. /* Release the clocks writes, also GDD ones */
  578. pm_runtime_put_sync(omap_port->pdev);
  579. }
  580. ssi_flush_queue(&omap_port->txqueue[i], cl);
  581. }
  582. for (i = 0; i < omap_port->channels; i++) {
  583. if (list_empty(&omap_port->rxqueue[i]))
  584. continue;
  585. msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
  586. link);
  587. if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
  588. rxbufstate |= (1 << i);
  589. status |= SSI_DATAAVAILABLE(i);
  590. }
  591. ssi_flush_queue(&omap_port->rxqueue[i], cl);
  592. /* Check if we keep the error detection interrupt armed */
  593. if (!list_empty(&omap_port->rxqueue[i]))
  594. status &= ~SSI_ERROROCCURED;
  595. }
  596. /* Cleanup write buffers */
  597. tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
  598. tmp &= ~txbufstate;
  599. writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
  600. /* Cleanup read buffers */
  601. tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
  602. tmp &= ~rxbufstate;
  603. writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
  604. /* Disarm and ack pending interrupts */
  605. tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  606. tmp &= ~status;
  607. writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  608. writel_relaxed(status, omap_ssi->sys +
  609. SSI_MPU_STATUS_REG(port->num, 0));
  610. }
  611. static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
  612. {
  613. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  614. struct hsi_port *port = hsi_get_port(cl);
  615. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  616. struct hsi_msg *msg;
  617. unsigned int i;
  618. u32 val = 0;
  619. u32 tmp;
  620. for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
  621. msg = omap_ssi->gdd_trn[i].msg;
  622. if ((!msg) || (msg->cl != cl))
  623. continue;
  624. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  625. val |= (1 << i);
  626. /*
  627. * Clock references for write will be handled in
  628. * ssi_cleanup_queues
  629. */
  630. if (msg->ttype == HSI_MSG_READ)
  631. pm_runtime_put_sync(omap_port->pdev);
  632. omap_ssi->gdd_trn[i].msg = NULL;
  633. }
  634. tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  635. tmp &= ~val;
  636. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  637. writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
  638. }
  639. static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
  640. {
  641. writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
  642. writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
  643. /* OCP barrier */
  644. mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
  645. return 0;
  646. }
  647. static int ssi_release(struct hsi_client *cl)
  648. {
  649. struct hsi_port *port = hsi_get_port(cl);
  650. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  651. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  652. spin_lock_bh(&omap_port->lock);
  653. pm_runtime_get_sync(omap_port->pdev);
  654. /* Stop all the pending DMA requests for that client */
  655. ssi_cleanup_gdd(ssi, cl);
  656. /* Now cleanup all the queues */
  657. ssi_cleanup_queues(cl);
  658. pm_runtime_put_sync(omap_port->pdev);
  659. /* If it is the last client of the port, do extra checks and cleanup */
  660. if (port->claimed <= 1) {
  661. /*
  662. * Drop the clock reference for the incoming wake line
  663. * if it is still kept high by the other side.
  664. */
  665. if (omap_port->wkin_cken) {
  666. pm_runtime_put_sync(omap_port->pdev);
  667. omap_port->wkin_cken = 0;
  668. }
  669. pm_runtime_get_sync(omap_port->pdev);
  670. /* Stop any SSI TX/RX without a client */
  671. ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
  672. omap_port->sst.mode = SSI_MODE_SLEEP;
  673. omap_port->ssr.mode = SSI_MODE_SLEEP;
  674. pm_runtime_put_sync(omap_port->pdev);
  675. WARN_ON(omap_port->wk_refcount != 0);
  676. }
  677. spin_unlock_bh(&omap_port->lock);
  678. return 0;
  679. }
  680. static void ssi_error(struct hsi_port *port)
  681. {
  682. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  683. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  684. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  685. struct hsi_msg *msg;
  686. unsigned int i;
  687. u32 err;
  688. u32 val;
  689. u32 tmp;
  690. /* ACK error */
  691. err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
  692. dev_err(&port->device, "SSI error: 0x%02x\n", err);
  693. if (!err) {
  694. dev_dbg(&port->device, "spurious SSI error ignored!\n");
  695. return;
  696. }
  697. spin_lock(&omap_ssi->lock);
  698. /* Cancel all GDD read transfers */
  699. for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
  700. msg = omap_ssi->gdd_trn[i].msg;
  701. if ((msg) && (msg->ttype == HSI_MSG_READ)) {
  702. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  703. val |= (1 << i);
  704. omap_ssi->gdd_trn[i].msg = NULL;
  705. }
  706. }
  707. tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  708. tmp &= ~val;
  709. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  710. spin_unlock(&omap_ssi->lock);
  711. /* Cancel all PIO read transfers */
  712. spin_lock(&omap_port->lock);
  713. tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  714. tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
  715. writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  716. /* ACK error */
  717. writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
  718. writel_relaxed(SSI_ERROROCCURED,
  719. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  720. /* Signal the error all current pending read requests */
  721. for (i = 0; i < omap_port->channels; i++) {
  722. if (list_empty(&omap_port->rxqueue[i]))
  723. continue;
  724. msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
  725. link);
  726. list_del(&msg->link);
  727. msg->status = HSI_STATUS_ERROR;
  728. spin_unlock(&omap_port->lock);
  729. msg->complete(msg);
  730. /* Now restart queued reads if any */
  731. ssi_transfer(omap_port, &omap_port->rxqueue[i]);
  732. spin_lock(&omap_port->lock);
  733. }
  734. spin_unlock(&omap_port->lock);
  735. }
  736. static void ssi_break_complete(struct hsi_port *port)
  737. {
  738. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  739. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  740. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  741. struct hsi_msg *msg;
  742. struct hsi_msg *tmp;
  743. u32 val;
  744. dev_dbg(&port->device, "HWBREAK received\n");
  745. spin_lock(&omap_port->lock);
  746. val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  747. val &= ~SSI_BREAKDETECTED;
  748. writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  749. writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
  750. writel(SSI_BREAKDETECTED,
  751. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  752. spin_unlock(&omap_port->lock);
  753. list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
  754. msg->status = HSI_STATUS_COMPLETED;
  755. spin_lock(&omap_port->lock);
  756. list_del(&msg->link);
  757. spin_unlock(&omap_port->lock);
  758. msg->complete(msg);
  759. }
  760. }
  761. static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
  762. {
  763. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  764. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  765. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  766. struct hsi_msg *msg;
  767. u32 *buf;
  768. u32 reg;
  769. u32 val;
  770. spin_lock(&omap_port->lock);
  771. msg = list_first_entry(queue, struct hsi_msg, link);
  772. if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
  773. msg->actual_len = 0;
  774. msg->status = HSI_STATUS_PENDING;
  775. }
  776. if (msg->ttype == HSI_MSG_WRITE)
  777. val = SSI_DATAACCEPT(msg->channel);
  778. else
  779. val = SSI_DATAAVAILABLE(msg->channel);
  780. if (msg->status == HSI_STATUS_PROCEEDING) {
  781. buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
  782. if (msg->ttype == HSI_MSG_WRITE)
  783. writel(*buf, omap_port->sst_base +
  784. SSI_SST_BUFFER_CH_REG(msg->channel));
  785. else
  786. *buf = readl(omap_port->ssr_base +
  787. SSI_SSR_BUFFER_CH_REG(msg->channel));
  788. dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
  789. msg->ttype, *buf);
  790. msg->actual_len += sizeof(*buf);
  791. if (msg->actual_len >= msg->sgt.sgl->length)
  792. msg->status = HSI_STATUS_COMPLETED;
  793. /*
  794. * Wait for the last written frame to be really sent before
  795. * we call the complete callback
  796. */
  797. if ((msg->status == HSI_STATUS_PROCEEDING) ||
  798. ((msg->status == HSI_STATUS_COMPLETED) &&
  799. (msg->ttype == HSI_MSG_WRITE))) {
  800. writel(val, omap_ssi->sys +
  801. SSI_MPU_STATUS_REG(port->num, 0));
  802. spin_unlock(&omap_port->lock);
  803. return;
  804. }
  805. }
  806. /* Transfer completed at this point */
  807. reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  808. if (msg->ttype == HSI_MSG_WRITE) {
  809. /* Release clocks for write transfer */
  810. pm_runtime_put_sync(omap_port->pdev);
  811. }
  812. reg &= ~val;
  813. writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  814. writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  815. list_del(&msg->link);
  816. spin_unlock(&omap_port->lock);
  817. msg->complete(msg);
  818. ssi_transfer(omap_port, queue);
  819. }
  820. static void ssi_pio_tasklet(unsigned long ssi_port)
  821. {
  822. struct hsi_port *port = (struct hsi_port *)ssi_port;
  823. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  824. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  825. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  826. void __iomem *sys = omap_ssi->sys;
  827. unsigned int ch;
  828. u32 status_reg;
  829. pm_runtime_get_sync(omap_port->pdev);
  830. status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
  831. status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
  832. for (ch = 0; ch < omap_port->channels; ch++) {
  833. if (status_reg & SSI_DATAACCEPT(ch))
  834. ssi_pio_complete(port, &omap_port->txqueue[ch]);
  835. if (status_reg & SSI_DATAAVAILABLE(ch))
  836. ssi_pio_complete(port, &omap_port->rxqueue[ch]);
  837. }
  838. if (status_reg & SSI_BREAKDETECTED)
  839. ssi_break_complete(port);
  840. if (status_reg & SSI_ERROROCCURED)
  841. ssi_error(port);
  842. status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
  843. status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
  844. pm_runtime_put_sync(omap_port->pdev);
  845. if (status_reg)
  846. tasklet_hi_schedule(&omap_port->pio_tasklet);
  847. else
  848. enable_irq(omap_port->irq);
  849. }
  850. static irqreturn_t ssi_pio_isr(int irq, void *port)
  851. {
  852. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  853. tasklet_hi_schedule(&omap_port->pio_tasklet);
  854. disable_irq_nosync(irq);
  855. return IRQ_HANDLED;
  856. }
  857. static void ssi_wake_tasklet(unsigned long ssi_port)
  858. {
  859. struct hsi_port *port = (struct hsi_port *)ssi_port;
  860. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  861. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  862. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  863. if (ssi_wakein(port)) {
  864. /**
  865. * We can have a quick High-Low-High transition in the line.
  866. * In such a case if we have long interrupt latencies,
  867. * we can miss the low event or get twice a high event.
  868. * This workaround will avoid breaking the clock reference
  869. * count when such a situation ocurrs.
  870. */
  871. spin_lock(&omap_port->lock);
  872. if (!omap_port->wkin_cken) {
  873. omap_port->wkin_cken = 1;
  874. pm_runtime_get_sync(omap_port->pdev);
  875. }
  876. spin_unlock(&omap_port->lock);
  877. dev_dbg(&ssi->device, "Wake in high\n");
  878. if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
  879. writel(SSI_WAKE(0),
  880. omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
  881. }
  882. hsi_event(port, HSI_EVENT_START_RX);
  883. } else {
  884. dev_dbg(&ssi->device, "Wake in low\n");
  885. if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
  886. writel(SSI_WAKE(0),
  887. omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
  888. }
  889. hsi_event(port, HSI_EVENT_STOP_RX);
  890. spin_lock(&omap_port->lock);
  891. if (omap_port->wkin_cken) {
  892. pm_runtime_put_sync(omap_port->pdev);
  893. omap_port->wkin_cken = 0;
  894. }
  895. spin_unlock(&omap_port->lock);
  896. }
  897. }
  898. static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port)
  899. {
  900. struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
  901. tasklet_hi_schedule(&omap_port->wake_tasklet);
  902. return IRQ_HANDLED;
  903. }
  904. static int __init ssi_port_irq(struct hsi_port *port,
  905. struct platform_device *pd)
  906. {
  907. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  908. int err;
  909. err = platform_get_irq(pd, 0);
  910. if (err < 0) {
  911. dev_err(&port->device, "Port IRQ resource missing\n");
  912. return err;
  913. }
  914. omap_port->irq = err;
  915. tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
  916. (unsigned long)port);
  917. err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr,
  918. 0, "mpu_irq0", port);
  919. if (err < 0)
  920. dev_err(&port->device, "Request IRQ %d failed (%d)\n",
  921. omap_port->irq, err);
  922. return err;
  923. }
  924. static int __init ssi_wake_irq(struct hsi_port *port,
  925. struct platform_device *pd)
  926. {
  927. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  928. int cawake_irq;
  929. int err;
  930. if (omap_port->wake_gpio == -1) {
  931. omap_port->wake_irq = -1;
  932. return 0;
  933. }
  934. cawake_irq = gpio_to_irq(omap_port->wake_gpio);
  935. omap_port->wake_irq = cawake_irq;
  936. tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
  937. (unsigned long)port);
  938. err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr,
  939. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
  940. "cawake", port);
  941. if (err < 0)
  942. dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
  943. cawake_irq, err);
  944. err = enable_irq_wake(cawake_irq);
  945. if (err < 0)
  946. dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
  947. cawake_irq, err);
  948. return err;
  949. }
  950. static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
  951. {
  952. unsigned int ch;
  953. for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
  954. INIT_LIST_HEAD(&omap_port->txqueue[ch]);
  955. INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
  956. }
  957. INIT_LIST_HEAD(&omap_port->brkqueue);
  958. }
  959. static int __init ssi_port_get_iomem(struct platform_device *pd,
  960. const char *name, void __iomem **pbase, dma_addr_t *phy)
  961. {
  962. struct hsi_port *port = platform_get_drvdata(pd);
  963. struct resource *mem;
  964. struct resource *ioarea;
  965. void __iomem *base;
  966. mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
  967. if (!mem) {
  968. dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
  969. return -ENXIO;
  970. }
  971. ioarea = devm_request_mem_region(&port->device, mem->start,
  972. resource_size(mem), dev_name(&pd->dev));
  973. if (!ioarea) {
  974. dev_err(&pd->dev, "%s IO memory region request failed\n",
  975. mem->name);
  976. return -ENXIO;
  977. }
  978. base = devm_ioremap(&port->device, mem->start, resource_size(mem));
  979. if (!base) {
  980. dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
  981. return -ENXIO;
  982. }
  983. *pbase = base;
  984. if (phy)
  985. *phy = mem->start;
  986. return 0;
  987. }
  988. static int __init ssi_port_probe(struct platform_device *pd)
  989. {
  990. struct device_node *np = pd->dev.of_node;
  991. struct hsi_port *port;
  992. struct omap_ssi_port *omap_port;
  993. struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
  994. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  995. int cawake_gpio = 0;
  996. u32 port_id;
  997. int err;
  998. dev_dbg(&pd->dev, "init ssi port...\n");
  999. if (!try_module_get(ssi->owner)) {
  1000. dev_err(&pd->dev, "could not increment parent module refcount\n");
  1001. return -ENODEV;
  1002. }
  1003. if (!ssi->port || !omap_ssi->port) {
  1004. dev_err(&pd->dev, "ssi controller not initialized!\n");
  1005. err = -ENODEV;
  1006. goto error;
  1007. }
  1008. /* get id of first uninitialized port in controller */
  1009. for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
  1010. port_id++)
  1011. ;
  1012. if (port_id >= ssi->num_ports) {
  1013. dev_err(&pd->dev, "port id out of range!\n");
  1014. err = -ENODEV;
  1015. goto error;
  1016. }
  1017. port = ssi->port[port_id];
  1018. if (!np) {
  1019. dev_err(&pd->dev, "missing device tree data\n");
  1020. err = -EINVAL;
  1021. goto error;
  1022. }
  1023. cawake_gpio = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0);
  1024. if (cawake_gpio < 0) {
  1025. dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n",
  1026. cawake_gpio);
  1027. err = -ENODEV;
  1028. goto error;
  1029. }
  1030. err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN,
  1031. "cawake");
  1032. if (err) {
  1033. dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n",
  1034. err);
  1035. err = -ENXIO;
  1036. goto error;
  1037. }
  1038. omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
  1039. if (!omap_port) {
  1040. err = -ENOMEM;
  1041. goto error;
  1042. }
  1043. omap_port->wake_gpio = cawake_gpio;
  1044. omap_port->pdev = &pd->dev;
  1045. omap_port->port_id = port_id;
  1046. /* initialize HSI port */
  1047. port->async = ssi_async;
  1048. port->setup = ssi_setup;
  1049. port->flush = ssi_flush;
  1050. port->start_tx = ssi_start_tx;
  1051. port->stop_tx = ssi_stop_tx;
  1052. port->release = ssi_release;
  1053. hsi_port_set_drvdata(port, omap_port);
  1054. omap_ssi->port[port_id] = omap_port;
  1055. platform_set_drvdata(pd, port);
  1056. err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
  1057. &omap_port->sst_dma);
  1058. if (err < 0)
  1059. goto error;
  1060. err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
  1061. &omap_port->ssr_dma);
  1062. if (err < 0)
  1063. goto error;
  1064. err = ssi_port_irq(port, pd);
  1065. if (err < 0)
  1066. goto error;
  1067. err = ssi_wake_irq(port, pd);
  1068. if (err < 0)
  1069. goto error;
  1070. ssi_queues_init(omap_port);
  1071. spin_lock_init(&omap_port->lock);
  1072. spin_lock_init(&omap_port->wk_lock);
  1073. omap_port->dev = &port->device;
  1074. pm_runtime_irq_safe(omap_port->pdev);
  1075. pm_runtime_enable(omap_port->pdev);
  1076. #ifdef CONFIG_DEBUG_FS
  1077. err = ssi_debug_add_port(omap_port, omap_ssi->dir);
  1078. if (err < 0) {
  1079. pm_runtime_disable(omap_port->pdev);
  1080. goto error;
  1081. }
  1082. #endif
  1083. hsi_add_clients_from_dt(port, np);
  1084. dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n",
  1085. port_id, cawake_gpio);
  1086. return 0;
  1087. error:
  1088. return err;
  1089. }
  1090. static int __exit ssi_port_remove(struct platform_device *pd)
  1091. {
  1092. struct hsi_port *port = platform_get_drvdata(pd);
  1093. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1094. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1095. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1096. #ifdef CONFIG_DEBUG_FS
  1097. ssi_debug_remove_port(port);
  1098. #endif
  1099. hsi_port_unregister_clients(port);
  1100. tasklet_kill(&omap_port->wake_tasklet);
  1101. tasklet_kill(&omap_port->pio_tasklet);
  1102. port->async = hsi_dummy_msg;
  1103. port->setup = hsi_dummy_cl;
  1104. port->flush = hsi_dummy_cl;
  1105. port->start_tx = hsi_dummy_cl;
  1106. port->stop_tx = hsi_dummy_cl;
  1107. port->release = hsi_dummy_cl;
  1108. omap_ssi->port[omap_port->port_id] = NULL;
  1109. platform_set_drvdata(pd, NULL);
  1110. module_put(ssi->owner);
  1111. pm_runtime_disable(&pd->dev);
  1112. return 0;
  1113. }
  1114. #ifdef CONFIG_PM
  1115. static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
  1116. {
  1117. struct hsi_port *port = to_hsi_port(omap_port->dev);
  1118. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1119. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1120. omap_port->sys_mpu_enable = readl(omap_ssi->sys +
  1121. SSI_MPU_ENABLE_REG(port->num, 0));
  1122. return 0;
  1123. }
  1124. static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
  1125. {
  1126. struct hsi_port *port = to_hsi_port(omap_port->dev);
  1127. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1128. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1129. void __iomem *base;
  1130. writel_relaxed(omap_port->sys_mpu_enable,
  1131. omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  1132. /* SST context */
  1133. base = omap_port->sst_base;
  1134. writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
  1135. writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
  1136. writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
  1137. /* SSR context */
  1138. base = omap_port->ssr_base;
  1139. writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
  1140. writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
  1141. writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
  1142. return 0;
  1143. }
  1144. static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
  1145. {
  1146. u32 mode;
  1147. writel_relaxed(omap_port->sst.mode,
  1148. omap_port->sst_base + SSI_SST_MODE_REG);
  1149. writel_relaxed(omap_port->ssr.mode,
  1150. omap_port->ssr_base + SSI_SSR_MODE_REG);
  1151. /* OCP barrier */
  1152. mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
  1153. return 0;
  1154. }
  1155. static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
  1156. {
  1157. writel_relaxed(omap_port->sst.divisor,
  1158. omap_port->sst_base + SSI_SST_DIVISOR_REG);
  1159. return 0;
  1160. }
  1161. static int omap_ssi_port_runtime_suspend(struct device *dev)
  1162. {
  1163. struct hsi_port *port = dev_get_drvdata(dev);
  1164. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1165. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1166. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1167. dev_dbg(dev, "port runtime suspend!\n");
  1168. ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
  1169. if (omap_ssi->get_loss)
  1170. omap_port->loss_count =
  1171. omap_ssi->get_loss(ssi->device.parent);
  1172. ssi_save_port_ctx(omap_port);
  1173. return 0;
  1174. }
  1175. static int omap_ssi_port_runtime_resume(struct device *dev)
  1176. {
  1177. struct hsi_port *port = dev_get_drvdata(dev);
  1178. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1179. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1180. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1181. dev_dbg(dev, "port runtime resume!\n");
  1182. if ((omap_ssi->get_loss) && (omap_port->loss_count ==
  1183. omap_ssi->get_loss(ssi->device.parent)))
  1184. goto mode; /* We always need to restore the mode & TX divisor */
  1185. ssi_restore_port_ctx(omap_port);
  1186. mode:
  1187. ssi_restore_divisor(omap_port);
  1188. ssi_restore_port_mode(omap_port);
  1189. return 0;
  1190. }
  1191. static const struct dev_pm_ops omap_ssi_port_pm_ops = {
  1192. SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
  1193. omap_ssi_port_runtime_resume, NULL)
  1194. };
  1195. #define DEV_PM_OPS (&omap_ssi_port_pm_ops)
  1196. #else
  1197. #define DEV_PM_OPS NULL
  1198. #endif
  1199. #ifdef CONFIG_OF
  1200. static const struct of_device_id omap_ssi_port_of_match[] = {
  1201. { .compatible = "ti,omap3-ssi-port", },
  1202. {},
  1203. };
  1204. MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
  1205. #else
  1206. #define omap_ssi_port_of_match NULL
  1207. #endif
  1208. static struct platform_driver ssi_port_pdriver = {
  1209. .remove = __exit_p(ssi_port_remove),
  1210. .driver = {
  1211. .name = "omap_ssi_port",
  1212. .of_match_table = omap_ssi_port_of_match,
  1213. .pm = DEV_PM_OPS,
  1214. },
  1215. };
  1216. module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe);
  1217. MODULE_ALIAS("platform:omap_ssi_port");
  1218. MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
  1219. MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
  1220. MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver");
  1221. MODULE_LICENSE("GPL v2");