ssi_protocol.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192
  1. /*
  2. * ssi_protocol.c
  3. *
  4. * Implementation of the SSI McSAAB improved protocol.
  5. *
  6. * Copyright (C) 2010 Nokia Corporation. All rights reserved.
  7. * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org>
  8. *
  9. * Contact: Carlos Chinea <carlos.chinea@nokia.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * version 2 as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <linux/atomic.h>
  26. #include <linux/clk.h>
  27. #include <linux/device.h>
  28. #include <linux/err.h>
  29. #include <linux/gpio.h>
  30. #include <linux/if_ether.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/if_phonet.h>
  33. #include <linux/init.h>
  34. #include <linux/irq.h>
  35. #include <linux/list.h>
  36. #include <linux/module.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/notifier.h>
  39. #include <linux/scatterlist.h>
  40. #include <linux/skbuff.h>
  41. #include <linux/slab.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/timer.h>
  44. #include <linux/hsi/hsi.h>
  45. #include <linux/hsi/ssi_protocol.h>
  46. void ssi_waketest(struct hsi_client *cl, unsigned int enable);
  47. #define SSIP_TXQUEUE_LEN 100
  48. #define SSIP_MAX_MTU 65535
  49. #define SSIP_DEFAULT_MTU 4000
  50. #define PN_MEDIA_SOS 21
  51. #define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */
  52. #define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */
  53. #define SSIP_KATOUT 15 /* 15 msecs */
  54. #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */
  55. #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
  56. #define SSIP_CMT_LOADER_SYNC 0x11223344
  57. /*
  58. * SSI protocol command definitions
  59. */
  60. #define SSIP_COMMAND(data) ((data) >> 28)
  61. #define SSIP_PAYLOAD(data) ((data) & 0xfffffff)
  62. /* Commands */
  63. #define SSIP_SW_BREAK 0
  64. #define SSIP_BOOTINFO_REQ 1
  65. #define SSIP_BOOTINFO_RESP 2
  66. #define SSIP_WAKETEST_RESULT 3
  67. #define SSIP_START_TRANS 4
  68. #define SSIP_READY 5
  69. /* Payloads */
  70. #define SSIP_DATA_VERSION(data) ((data) & 0xff)
  71. #define SSIP_LOCAL_VERID 1
  72. #define SSIP_WAKETEST_OK 0
  73. #define SSIP_WAKETEST_FAILED 1
  74. #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff)
  75. #define SSIP_MSG_ID(data) ((data) & 0xff)
  76. /* Generic Command */
  77. #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff))
  78. /* Commands for the control channel */
  79. #define SSIP_BOOTINFO_REQ_CMD(ver) \
  80. SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver))
  81. #define SSIP_BOOTINFO_RESP_CMD(ver) \
  82. SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver))
  83. #define SSIP_START_TRANS_CMD(pdulen, id) \
  84. SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id)))
  85. #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0)
  86. #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0)
  87. /* Main state machine states */
  88. enum {
  89. INIT,
  90. HANDSHAKE,
  91. ACTIVE,
  92. };
  93. /* Send state machine states */
  94. enum {
  95. SEND_IDLE,
  96. WAIT4READY,
  97. SEND_READY,
  98. SENDING,
  99. SENDING_SWBREAK,
  100. };
  101. /* Receive state machine states */
  102. enum {
  103. RECV_IDLE,
  104. RECV_READY,
  105. RECEIVING,
  106. };
  107. /**
  108. * struct ssi_protocol - SSI protocol (McSAAB) data
  109. * @main_state: Main state machine
  110. * @send_state: TX state machine
  111. * @recv_state: RX state machine
  112. * @waketest: Flag to follow wake line test
  113. * @rxid: RX data id
  114. * @txid: TX data id
  115. * @txqueue_len: TX queue length
  116. * @tx_wd: TX watchdog
  117. * @rx_wd: RX watchdog
  118. * @keep_alive: Workaround for SSI HW bug
  119. * @lock: To serialize access to this struct
  120. * @netdev: Phonet network device
  121. * @txqueue: TX data queue
  122. * @cmdqueue: Queue of free commands
  123. * @cl: HSI client own reference
  124. * @link: Link for ssip_list
  125. * @tx_usecount: Refcount to keep track the slaves that use the wake line
  126. * @channel_id_cmd: HSI channel id for command stream
  127. * @channel_id_data: HSI channel id for data stream
  128. */
  129. struct ssi_protocol {
  130. unsigned int main_state;
  131. unsigned int send_state;
  132. unsigned int recv_state;
  133. unsigned int waketest:1;
  134. u8 rxid;
  135. u8 txid;
  136. unsigned int txqueue_len;
  137. struct timer_list tx_wd;
  138. struct timer_list rx_wd;
  139. struct timer_list keep_alive; /* wake-up workaround */
  140. spinlock_t lock;
  141. struct net_device *netdev;
  142. struct list_head txqueue;
  143. struct list_head cmdqueue;
  144. struct hsi_client *cl;
  145. struct list_head link;
  146. atomic_t tx_usecnt;
  147. int channel_id_cmd;
  148. int channel_id_data;
  149. };
  150. /* List of ssi protocol instances */
  151. static LIST_HEAD(ssip_list);
  152. static void ssip_rxcmd_complete(struct hsi_msg *msg);
  153. static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd)
  154. {
  155. u32 *data;
  156. data = sg_virt(msg->sgt.sgl);
  157. *data = cmd;
  158. }
  159. static inline u32 ssip_get_cmd(struct hsi_msg *msg)
  160. {
  161. u32 *data;
  162. data = sg_virt(msg->sgt.sgl);
  163. return *data;
  164. }
  165. static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
  166. {
  167. skb_frag_t *frag;
  168. struct scatterlist *sg;
  169. int i;
  170. BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
  171. sg = msg->sgt.sgl;
  172. sg_set_buf(sg, skb->data, skb_headlen(skb));
  173. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  174. sg = sg_next(sg);
  175. BUG_ON(!sg);
  176. frag = &skb_shinfo(skb)->frags[i];
  177. sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
  178. }
  179. }
  180. static void ssip_free_data(struct hsi_msg *msg)
  181. {
  182. struct sk_buff *skb;
  183. skb = msg->context;
  184. pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context,
  185. skb);
  186. msg->destructor = NULL;
  187. dev_kfree_skb(skb);
  188. hsi_free_msg(msg);
  189. }
  190. static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi,
  191. struct sk_buff *skb, gfp_t flags)
  192. {
  193. struct hsi_msg *msg;
  194. msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
  195. if (!msg)
  196. return NULL;
  197. ssip_skb_to_msg(skb, msg);
  198. msg->destructor = ssip_free_data;
  199. msg->channel = ssi->channel_id_data;
  200. msg->context = skb;
  201. return msg;
  202. }
  203. static inline void ssip_release_cmd(struct hsi_msg *msg)
  204. {
  205. struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
  206. dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
  207. spin_lock_bh(&ssi->lock);
  208. list_add_tail(&msg->link, &ssi->cmdqueue);
  209. spin_unlock_bh(&ssi->lock);
  210. }
  211. static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi)
  212. {
  213. struct hsi_msg *msg;
  214. BUG_ON(list_empty(&ssi->cmdqueue));
  215. spin_lock_bh(&ssi->lock);
  216. msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
  217. list_del(&msg->link);
  218. spin_unlock_bh(&ssi->lock);
  219. msg->destructor = ssip_release_cmd;
  220. return msg;
  221. }
  222. static void ssip_free_cmds(struct ssi_protocol *ssi)
  223. {
  224. struct hsi_msg *msg, *tmp;
  225. list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
  226. list_del(&msg->link);
  227. msg->destructor = NULL;
  228. kfree(sg_virt(msg->sgt.sgl));
  229. hsi_free_msg(msg);
  230. }
  231. }
  232. static int ssip_alloc_cmds(struct ssi_protocol *ssi)
  233. {
  234. struct hsi_msg *msg;
  235. u32 *buf;
  236. unsigned int i;
  237. for (i = 0; i < SSIP_MAX_CMDS; i++) {
  238. msg = hsi_alloc_msg(1, GFP_KERNEL);
  239. if (!msg)
  240. goto out;
  241. buf = kmalloc(sizeof(*buf), GFP_KERNEL);
  242. if (!buf) {
  243. hsi_free_msg(msg);
  244. goto out;
  245. }
  246. sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
  247. msg->channel = ssi->channel_id_cmd;
  248. list_add_tail(&msg->link, &ssi->cmdqueue);
  249. }
  250. return 0;
  251. out:
  252. ssip_free_cmds(ssi);
  253. return -ENOMEM;
  254. }
  255. static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
  256. {
  257. ssi->recv_state = state;
  258. switch (state) {
  259. case RECV_IDLE:
  260. del_timer(&ssi->rx_wd);
  261. if (ssi->send_state == SEND_IDLE)
  262. del_timer(&ssi->keep_alive);
  263. break;
  264. case RECV_READY:
  265. /* CMT speech workaround */
  266. if (atomic_read(&ssi->tx_usecnt))
  267. break;
  268. /* Otherwise fall through */
  269. case RECEIVING:
  270. mod_timer(&ssi->keep_alive, jiffies +
  271. msecs_to_jiffies(SSIP_KATOUT));
  272. mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  273. break;
  274. default:
  275. break;
  276. }
  277. }
  278. static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state)
  279. {
  280. ssi->send_state = state;
  281. switch (state) {
  282. case SEND_IDLE:
  283. case SEND_READY:
  284. del_timer(&ssi->tx_wd);
  285. if (ssi->recv_state == RECV_IDLE)
  286. del_timer(&ssi->keep_alive);
  287. break;
  288. case WAIT4READY:
  289. case SENDING:
  290. case SENDING_SWBREAK:
  291. mod_timer(&ssi->keep_alive,
  292. jiffies + msecs_to_jiffies(SSIP_KATOUT));
  293. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  294. break;
  295. default:
  296. break;
  297. }
  298. }
  299. struct hsi_client *ssip_slave_get_master(struct hsi_client *slave)
  300. {
  301. struct hsi_client *master = ERR_PTR(-ENODEV);
  302. struct ssi_protocol *ssi;
  303. list_for_each_entry(ssi, &ssip_list, link)
  304. if (slave->device.parent == ssi->cl->device.parent) {
  305. master = ssi->cl;
  306. break;
  307. }
  308. return master;
  309. }
  310. EXPORT_SYMBOL_GPL(ssip_slave_get_master);
  311. int ssip_slave_start_tx(struct hsi_client *master)
  312. {
  313. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  314. dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt));
  315. spin_lock_bh(&ssi->lock);
  316. if (ssi->send_state == SEND_IDLE) {
  317. ssip_set_txstate(ssi, WAIT4READY);
  318. hsi_start_tx(master);
  319. }
  320. spin_unlock_bh(&ssi->lock);
  321. atomic_inc(&ssi->tx_usecnt);
  322. return 0;
  323. }
  324. EXPORT_SYMBOL_GPL(ssip_slave_start_tx);
  325. int ssip_slave_stop_tx(struct hsi_client *master)
  326. {
  327. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  328. WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0);
  329. if (atomic_dec_and_test(&ssi->tx_usecnt)) {
  330. spin_lock_bh(&ssi->lock);
  331. if ((ssi->send_state == SEND_READY) ||
  332. (ssi->send_state == WAIT4READY)) {
  333. ssip_set_txstate(ssi, SEND_IDLE);
  334. hsi_stop_tx(master);
  335. }
  336. spin_unlock_bh(&ssi->lock);
  337. }
  338. dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt));
  339. return 0;
  340. }
  341. EXPORT_SYMBOL_GPL(ssip_slave_stop_tx);
  342. int ssip_slave_running(struct hsi_client *master)
  343. {
  344. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  345. return netif_running(ssi->netdev);
  346. }
  347. EXPORT_SYMBOL_GPL(ssip_slave_running);
  348. static void ssip_reset(struct hsi_client *cl)
  349. {
  350. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  351. struct list_head *head, *tmp;
  352. struct hsi_msg *msg;
  353. if (netif_running(ssi->netdev))
  354. netif_carrier_off(ssi->netdev);
  355. hsi_flush(cl);
  356. spin_lock_bh(&ssi->lock);
  357. if (ssi->send_state != SEND_IDLE)
  358. hsi_stop_tx(cl);
  359. if (ssi->waketest)
  360. ssi_waketest(cl, 0);
  361. del_timer(&ssi->rx_wd);
  362. del_timer(&ssi->tx_wd);
  363. del_timer(&ssi->keep_alive);
  364. ssi->main_state = 0;
  365. ssi->send_state = 0;
  366. ssi->recv_state = 0;
  367. ssi->waketest = 0;
  368. ssi->rxid = 0;
  369. ssi->txid = 0;
  370. list_for_each_safe(head, tmp, &ssi->txqueue) {
  371. msg = list_entry(head, struct hsi_msg, link);
  372. dev_dbg(&cl->device, "Pending TX data\n");
  373. list_del(head);
  374. ssip_free_data(msg);
  375. }
  376. ssi->txqueue_len = 0;
  377. spin_unlock_bh(&ssi->lock);
  378. }
  379. static void ssip_dump_state(struct hsi_client *cl)
  380. {
  381. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  382. struct hsi_msg *msg;
  383. spin_lock_bh(&ssi->lock);
  384. dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
  385. dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
  386. dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
  387. dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
  388. "Online" : "Offline");
  389. dev_err(&cl->device, "Wake test %d\n", ssi->waketest);
  390. dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
  391. dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
  392. list_for_each_entry(msg, &ssi->txqueue, link)
  393. dev_err(&cl->device, "pending TX data (%p)\n", msg);
  394. spin_unlock_bh(&ssi->lock);
  395. }
  396. static void ssip_error(struct hsi_client *cl)
  397. {
  398. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  399. struct hsi_msg *msg;
  400. ssip_dump_state(cl);
  401. ssip_reset(cl);
  402. msg = ssip_claim_cmd(ssi);
  403. msg->complete = ssip_rxcmd_complete;
  404. hsi_async_read(cl, msg);
  405. }
  406. static void ssip_keep_alive(unsigned long data)
  407. {
  408. struct hsi_client *cl = (struct hsi_client *)data;
  409. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  410. dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
  411. ssi->main_state, ssi->recv_state, ssi->send_state);
  412. spin_lock(&ssi->lock);
  413. if (ssi->recv_state == RECV_IDLE)
  414. switch (ssi->send_state) {
  415. case SEND_READY:
  416. if (atomic_read(&ssi->tx_usecnt) == 0)
  417. break;
  418. /*
  419. * Fall through. Workaround for cmt-speech
  420. * in that case we relay on audio timers.
  421. */
  422. case SEND_IDLE:
  423. spin_unlock(&ssi->lock);
  424. return;
  425. }
  426. mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT));
  427. spin_unlock(&ssi->lock);
  428. }
  429. static void ssip_wd(unsigned long data)
  430. {
  431. struct hsi_client *cl = (struct hsi_client *)data;
  432. dev_err(&cl->device, "Watchdog trigerred\n");
  433. ssip_error(cl);
  434. }
  435. static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl)
  436. {
  437. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  438. struct hsi_msg *msg;
  439. dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
  440. msg = ssip_claim_cmd(ssi);
  441. ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID));
  442. msg->complete = ssip_release_cmd;
  443. hsi_async_write(cl, msg);
  444. dev_dbg(&cl->device, "Issuing RX command\n");
  445. msg = ssip_claim_cmd(ssi);
  446. msg->complete = ssip_rxcmd_complete;
  447. hsi_async_read(cl, msg);
  448. }
  449. static void ssip_start_rx(struct hsi_client *cl)
  450. {
  451. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  452. struct hsi_msg *msg;
  453. dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
  454. ssi->recv_state);
  455. spin_lock(&ssi->lock);
  456. /*
  457. * We can have two UP events in a row due to a short low
  458. * high transition. Therefore we need to ignore the sencond UP event.
  459. */
  460. if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
  461. if (ssi->main_state == INIT) {
  462. ssi->main_state = HANDSHAKE;
  463. spin_unlock(&ssi->lock);
  464. ssip_send_bootinfo_req_cmd(cl);
  465. } else {
  466. spin_unlock(&ssi->lock);
  467. }
  468. return;
  469. }
  470. ssip_set_rxstate(ssi, RECV_READY);
  471. spin_unlock(&ssi->lock);
  472. msg = ssip_claim_cmd(ssi);
  473. ssip_set_cmd(msg, SSIP_READY_CMD);
  474. msg->complete = ssip_release_cmd;
  475. dev_dbg(&cl->device, "Send READY\n");
  476. hsi_async_write(cl, msg);
  477. }
  478. static void ssip_stop_rx(struct hsi_client *cl)
  479. {
  480. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  481. dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
  482. spin_lock(&ssi->lock);
  483. if (likely(ssi->main_state == ACTIVE))
  484. ssip_set_rxstate(ssi, RECV_IDLE);
  485. spin_unlock(&ssi->lock);
  486. }
  487. static void ssip_free_strans(struct hsi_msg *msg)
  488. {
  489. ssip_free_data(msg->context);
  490. ssip_release_cmd(msg);
  491. }
  492. static void ssip_strans_complete(struct hsi_msg *msg)
  493. {
  494. struct hsi_client *cl = msg->cl;
  495. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  496. struct hsi_msg *data;
  497. data = msg->context;
  498. ssip_release_cmd(msg);
  499. spin_lock(&ssi->lock);
  500. ssip_set_txstate(ssi, SENDING);
  501. spin_unlock(&ssi->lock);
  502. hsi_async_write(cl, data);
  503. }
  504. static int ssip_xmit(struct hsi_client *cl)
  505. {
  506. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  507. struct hsi_msg *msg, *dmsg;
  508. struct sk_buff *skb;
  509. spin_lock_bh(&ssi->lock);
  510. if (list_empty(&ssi->txqueue)) {
  511. spin_unlock_bh(&ssi->lock);
  512. return 0;
  513. }
  514. dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link);
  515. list_del(&dmsg->link);
  516. ssi->txqueue_len--;
  517. spin_unlock_bh(&ssi->lock);
  518. msg = ssip_claim_cmd(ssi);
  519. skb = dmsg->context;
  520. msg->context = dmsg;
  521. msg->complete = ssip_strans_complete;
  522. msg->destructor = ssip_free_strans;
  523. spin_lock_bh(&ssi->lock);
  524. ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
  525. ssi->txid));
  526. ssi->txid++;
  527. ssip_set_txstate(ssi, SENDING);
  528. spin_unlock_bh(&ssi->lock);
  529. dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
  530. SSIP_BYTES_TO_FRAMES(skb->len));
  531. return hsi_async_write(cl, msg);
  532. }
  533. /* In soft IRQ context */
  534. static void ssip_pn_rx(struct sk_buff *skb)
  535. {
  536. struct net_device *dev = skb->dev;
  537. if (unlikely(!netif_running(dev))) {
  538. dev_dbg(&dev->dev, "Drop RX packet\n");
  539. dev->stats.rx_dropped++;
  540. dev_kfree_skb(skb);
  541. return;
  542. }
  543. if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
  544. dev_dbg(&dev->dev, "Error drop RX packet\n");
  545. dev->stats.rx_errors++;
  546. dev->stats.rx_length_errors++;
  547. dev_kfree_skb(skb);
  548. return;
  549. }
  550. dev->stats.rx_packets++;
  551. dev->stats.rx_bytes += skb->len;
  552. /* length field is exchanged in network byte order */
  553. ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]);
  554. dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n",
  555. ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
  556. skb->protocol = htons(ETH_P_PHONET);
  557. skb_reset_mac_header(skb);
  558. __skb_pull(skb, 1);
  559. netif_rx(skb);
  560. }
  561. static void ssip_rx_data_complete(struct hsi_msg *msg)
  562. {
  563. struct hsi_client *cl = msg->cl;
  564. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  565. struct sk_buff *skb;
  566. if (msg->status == HSI_STATUS_ERROR) {
  567. dev_err(&cl->device, "RX data error\n");
  568. ssip_free_data(msg);
  569. ssip_error(cl);
  570. return;
  571. }
  572. del_timer(&ssi->rx_wd); /* FIXME: Revisit */
  573. skb = msg->context;
  574. ssip_pn_rx(skb);
  575. hsi_free_msg(msg);
  576. }
  577. static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
  578. {
  579. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  580. struct hsi_msg *msg;
  581. /* Workaroud: Ignore CMT Loader message leftover */
  582. if (cmd == SSIP_CMT_LOADER_SYNC)
  583. return;
  584. switch (ssi->main_state) {
  585. case ACTIVE:
  586. dev_err(&cl->device, "Boot info req on active state\n");
  587. ssip_error(cl);
  588. /* Fall through */
  589. case INIT:
  590. spin_lock(&ssi->lock);
  591. ssi->main_state = HANDSHAKE;
  592. if (!ssi->waketest) {
  593. ssi->waketest = 1;
  594. ssi_waketest(cl, 1); /* FIXME: To be removed */
  595. }
  596. /* Start boot handshake watchdog */
  597. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  598. spin_unlock(&ssi->lock);
  599. dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
  600. if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
  601. dev_warn(&cl->device, "boot info req verid mismatch\n");
  602. msg = ssip_claim_cmd(ssi);
  603. ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID));
  604. msg->complete = ssip_release_cmd;
  605. hsi_async_write(cl, msg);
  606. break;
  607. case HANDSHAKE:
  608. /* Ignore */
  609. break;
  610. default:
  611. dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
  612. break;
  613. }
  614. }
  615. static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
  616. {
  617. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  618. if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
  619. dev_warn(&cl->device, "boot info resp verid mismatch\n");
  620. spin_lock(&ssi->lock);
  621. if (ssi->main_state != ACTIVE)
  622. /* Use tx_wd as a boot watchdog in non ACTIVE state */
  623. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  624. else
  625. dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
  626. ssi->main_state);
  627. spin_unlock(&ssi->lock);
  628. }
  629. static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
  630. {
  631. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  632. unsigned int wkres = SSIP_PAYLOAD(cmd);
  633. spin_lock(&ssi->lock);
  634. if (ssi->main_state != HANDSHAKE) {
  635. dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
  636. ssi->main_state);
  637. spin_unlock(&ssi->lock);
  638. return;
  639. }
  640. if (ssi->waketest) {
  641. ssi->waketest = 0;
  642. ssi_waketest(cl, 0); /* FIXME: To be removed */
  643. }
  644. ssi->main_state = ACTIVE;
  645. del_timer(&ssi->tx_wd); /* Stop boot handshake timer */
  646. spin_unlock(&ssi->lock);
  647. dev_notice(&cl->device, "WAKELINES TEST %s\n",
  648. wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
  649. if (wkres & SSIP_WAKETEST_FAILED) {
  650. ssip_error(cl);
  651. return;
  652. }
  653. dev_dbg(&cl->device, "CMT is ONLINE\n");
  654. netif_wake_queue(ssi->netdev);
  655. netif_carrier_on(ssi->netdev);
  656. }
  657. static void ssip_rx_ready(struct hsi_client *cl)
  658. {
  659. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  660. spin_lock(&ssi->lock);
  661. if (unlikely(ssi->main_state != ACTIVE)) {
  662. dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
  663. ssi->send_state, ssi->main_state);
  664. spin_unlock(&ssi->lock);
  665. return;
  666. }
  667. if (ssi->send_state != WAIT4READY) {
  668. dev_dbg(&cl->device, "Ignore spurious READY command\n");
  669. spin_unlock(&ssi->lock);
  670. return;
  671. }
  672. ssip_set_txstate(ssi, SEND_READY);
  673. spin_unlock(&ssi->lock);
  674. ssip_xmit(cl);
  675. }
  676. static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
  677. {
  678. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  679. struct sk_buff *skb;
  680. struct hsi_msg *msg;
  681. int len = SSIP_PDU_LENGTH(cmd);
  682. dev_dbg(&cl->device, "RX strans: %d frames\n", len);
  683. spin_lock(&ssi->lock);
  684. if (unlikely(ssi->main_state != ACTIVE)) {
  685. dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
  686. ssi->send_state, ssi->main_state);
  687. spin_unlock(&ssi->lock);
  688. return;
  689. }
  690. ssip_set_rxstate(ssi, RECEIVING);
  691. if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
  692. dev_err(&cl->device, "START TRANS id %d expected %d\n",
  693. SSIP_MSG_ID(cmd), ssi->rxid);
  694. spin_unlock(&ssi->lock);
  695. goto out1;
  696. }
  697. ssi->rxid++;
  698. spin_unlock(&ssi->lock);
  699. skb = netdev_alloc_skb(ssi->netdev, len * 4);
  700. if (unlikely(!skb)) {
  701. dev_err(&cl->device, "No memory for rx skb\n");
  702. goto out1;
  703. }
  704. skb->dev = ssi->netdev;
  705. skb_put(skb, len * 4);
  706. msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
  707. if (unlikely(!msg)) {
  708. dev_err(&cl->device, "No memory for RX data msg\n");
  709. goto out2;
  710. }
  711. msg->complete = ssip_rx_data_complete;
  712. hsi_async_read(cl, msg);
  713. return;
  714. out2:
  715. dev_kfree_skb(skb);
  716. out1:
  717. ssip_error(cl);
  718. }
  719. static void ssip_rxcmd_complete(struct hsi_msg *msg)
  720. {
  721. struct hsi_client *cl = msg->cl;
  722. u32 cmd = ssip_get_cmd(msg);
  723. unsigned int cmdid = SSIP_COMMAND(cmd);
  724. if (msg->status == HSI_STATUS_ERROR) {
  725. dev_err(&cl->device, "RX error detected\n");
  726. ssip_release_cmd(msg);
  727. ssip_error(cl);
  728. return;
  729. }
  730. hsi_async_read(cl, msg);
  731. dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
  732. switch (cmdid) {
  733. case SSIP_SW_BREAK:
  734. /* Ignored */
  735. break;
  736. case SSIP_BOOTINFO_REQ:
  737. ssip_rx_bootinforeq(cl, cmd);
  738. break;
  739. case SSIP_BOOTINFO_RESP:
  740. ssip_rx_bootinforesp(cl, cmd);
  741. break;
  742. case SSIP_WAKETEST_RESULT:
  743. ssip_rx_waketest(cl, cmd);
  744. break;
  745. case SSIP_START_TRANS:
  746. ssip_rx_strans(cl, cmd);
  747. break;
  748. case SSIP_READY:
  749. ssip_rx_ready(cl);
  750. break;
  751. default:
  752. dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
  753. break;
  754. }
  755. }
  756. static void ssip_swbreak_complete(struct hsi_msg *msg)
  757. {
  758. struct hsi_client *cl = msg->cl;
  759. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  760. ssip_release_cmd(msg);
  761. spin_lock(&ssi->lock);
  762. if (list_empty(&ssi->txqueue)) {
  763. if (atomic_read(&ssi->tx_usecnt)) {
  764. ssip_set_txstate(ssi, SEND_READY);
  765. } else {
  766. ssip_set_txstate(ssi, SEND_IDLE);
  767. hsi_stop_tx(cl);
  768. }
  769. spin_unlock(&ssi->lock);
  770. } else {
  771. spin_unlock(&ssi->lock);
  772. ssip_xmit(cl);
  773. }
  774. netif_wake_queue(ssi->netdev);
  775. }
  776. static void ssip_tx_data_complete(struct hsi_msg *msg)
  777. {
  778. struct hsi_client *cl = msg->cl;
  779. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  780. struct hsi_msg *cmsg;
  781. if (msg->status == HSI_STATUS_ERROR) {
  782. dev_err(&cl->device, "TX data error\n");
  783. ssip_error(cl);
  784. goto out;
  785. }
  786. spin_lock(&ssi->lock);
  787. if (list_empty(&ssi->txqueue)) {
  788. ssip_set_txstate(ssi, SENDING_SWBREAK);
  789. spin_unlock(&ssi->lock);
  790. cmsg = ssip_claim_cmd(ssi);
  791. ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
  792. cmsg->complete = ssip_swbreak_complete;
  793. dev_dbg(&cl->device, "Send SWBREAK\n");
  794. hsi_async_write(cl, cmsg);
  795. } else {
  796. spin_unlock(&ssi->lock);
  797. ssip_xmit(cl);
  798. }
  799. out:
  800. ssip_free_data(msg);
  801. }
  802. static void ssip_port_event(struct hsi_client *cl, unsigned long event)
  803. {
  804. switch (event) {
  805. case HSI_EVENT_START_RX:
  806. ssip_start_rx(cl);
  807. break;
  808. case HSI_EVENT_STOP_RX:
  809. ssip_stop_rx(cl);
  810. break;
  811. default:
  812. return;
  813. }
  814. }
  815. static int ssip_pn_open(struct net_device *dev)
  816. {
  817. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  818. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  819. int err;
  820. err = hsi_claim_port(cl, 1);
  821. if (err < 0) {
  822. dev_err(&cl->device, "SSI port already claimed\n");
  823. return err;
  824. }
  825. err = hsi_register_port_event(cl, ssip_port_event);
  826. if (err < 0) {
  827. dev_err(&cl->device, "Register HSI port event failed (%d)\n",
  828. err);
  829. return err;
  830. }
  831. dev_dbg(&cl->device, "Configuring SSI port\n");
  832. hsi_setup(cl);
  833. spin_lock_bh(&ssi->lock);
  834. if (!ssi->waketest) {
  835. ssi->waketest = 1;
  836. ssi_waketest(cl, 1); /* FIXME: To be removed */
  837. }
  838. ssi->main_state = INIT;
  839. spin_unlock_bh(&ssi->lock);
  840. return 0;
  841. }
  842. static int ssip_pn_stop(struct net_device *dev)
  843. {
  844. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  845. ssip_reset(cl);
  846. hsi_unregister_port_event(cl);
  847. hsi_release_port(cl);
  848. return 0;
  849. }
  850. static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu)
  851. {
  852. if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU)
  853. return -EINVAL;
  854. dev->mtu = new_mtu;
  855. return 0;
  856. }
  857. static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
  858. {
  859. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  860. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  861. struct hsi_msg *msg;
  862. if ((skb->protocol != htons(ETH_P_PHONET)) ||
  863. (skb->len < SSIP_MIN_PN_HDR))
  864. goto drop;
  865. /* Pad to 32-bits - FIXME: Revisit*/
  866. if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
  867. goto inc_dropped;
  868. /*
  869. * Modem sends Phonet messages over SSI with its own endianess...
  870. * Assume that modem has the same endianess as we do.
  871. */
  872. if (skb_cow_head(skb, 0))
  873. goto drop;
  874. /* length field is exchanged in network byte order */
  875. ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]);
  876. msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
  877. if (!msg) {
  878. dev_dbg(&cl->device, "Dropping tx data: No memory\n");
  879. goto drop;
  880. }
  881. msg->complete = ssip_tx_data_complete;
  882. spin_lock_bh(&ssi->lock);
  883. if (unlikely(ssi->main_state != ACTIVE)) {
  884. spin_unlock_bh(&ssi->lock);
  885. dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
  886. goto drop2;
  887. }
  888. list_add_tail(&msg->link, &ssi->txqueue);
  889. ssi->txqueue_len++;
  890. if (dev->tx_queue_len < ssi->txqueue_len) {
  891. dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
  892. netif_stop_queue(dev);
  893. }
  894. if (ssi->send_state == SEND_IDLE) {
  895. ssip_set_txstate(ssi, WAIT4READY);
  896. spin_unlock_bh(&ssi->lock);
  897. dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
  898. hsi_start_tx(cl);
  899. } else if (ssi->send_state == SEND_READY) {
  900. /* Needed for cmt-speech workaround */
  901. dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
  902. ssi->txqueue_len);
  903. spin_unlock_bh(&ssi->lock);
  904. ssip_xmit(cl);
  905. } else {
  906. spin_unlock_bh(&ssi->lock);
  907. }
  908. dev->stats.tx_packets++;
  909. dev->stats.tx_bytes += skb->len;
  910. return 0;
  911. drop2:
  912. hsi_free_msg(msg);
  913. drop:
  914. dev_kfree_skb(skb);
  915. inc_dropped:
  916. dev->stats.tx_dropped++;
  917. return 0;
  918. }
  919. /* CMT reset event handler */
  920. void ssip_reset_event(struct hsi_client *master)
  921. {
  922. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  923. dev_err(&ssi->cl->device, "CMT reset detected!\n");
  924. ssip_error(ssi->cl);
  925. }
  926. EXPORT_SYMBOL_GPL(ssip_reset_event);
  927. static const struct net_device_ops ssip_pn_ops = {
  928. .ndo_open = ssip_pn_open,
  929. .ndo_stop = ssip_pn_stop,
  930. .ndo_start_xmit = ssip_pn_xmit,
  931. .ndo_change_mtu = ssip_pn_set_mtu,
  932. };
  933. static void ssip_pn_setup(struct net_device *dev)
  934. {
  935. dev->features = 0;
  936. dev->netdev_ops = &ssip_pn_ops;
  937. dev->type = ARPHRD_PHONET;
  938. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  939. dev->mtu = SSIP_DEFAULT_MTU;
  940. dev->hard_header_len = 1;
  941. dev->dev_addr[0] = PN_MEDIA_SOS;
  942. dev->addr_len = 1;
  943. dev->tx_queue_len = SSIP_TXQUEUE_LEN;
  944. dev->destructor = free_netdev;
  945. dev->header_ops = &phonet_header_ops;
  946. }
  947. static int ssi_protocol_probe(struct device *dev)
  948. {
  949. static const char ifname[] = "phonet%d";
  950. struct hsi_client *cl = to_hsi_client(dev);
  951. struct ssi_protocol *ssi;
  952. int err;
  953. ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
  954. if (!ssi) {
  955. dev_err(dev, "No memory for ssi protocol\n");
  956. return -ENOMEM;
  957. }
  958. spin_lock_init(&ssi->lock);
  959. init_timer_deferrable(&ssi->rx_wd);
  960. init_timer_deferrable(&ssi->tx_wd);
  961. init_timer(&ssi->keep_alive);
  962. ssi->rx_wd.data = (unsigned long)cl;
  963. ssi->rx_wd.function = ssip_wd;
  964. ssi->tx_wd.data = (unsigned long)cl;
  965. ssi->tx_wd.function = ssip_wd;
  966. ssi->keep_alive.data = (unsigned long)cl;
  967. ssi->keep_alive.function = ssip_keep_alive;
  968. INIT_LIST_HEAD(&ssi->txqueue);
  969. INIT_LIST_HEAD(&ssi->cmdqueue);
  970. atomic_set(&ssi->tx_usecnt, 0);
  971. hsi_client_set_drvdata(cl, ssi);
  972. ssi->cl = cl;
  973. ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
  974. if (ssi->channel_id_cmd < 0) {
  975. err = ssi->channel_id_cmd;
  976. dev_err(dev, "Could not get cmd channel (%d)\n", err);
  977. goto out;
  978. }
  979. ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data");
  980. if (ssi->channel_id_data < 0) {
  981. err = ssi->channel_id_data;
  982. dev_err(dev, "Could not get data channel (%d)\n", err);
  983. goto out;
  984. }
  985. err = ssip_alloc_cmds(ssi);
  986. if (err < 0) {
  987. dev_err(dev, "No memory for commands\n");
  988. goto out;
  989. }
  990. ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup);
  991. if (!ssi->netdev) {
  992. dev_err(dev, "No memory for netdev\n");
  993. err = -ENOMEM;
  994. goto out1;
  995. }
  996. SET_NETDEV_DEV(ssi->netdev, dev);
  997. netif_carrier_off(ssi->netdev);
  998. err = register_netdev(ssi->netdev);
  999. if (err < 0) {
  1000. dev_err(dev, "Register netdev failed (%d)\n", err);
  1001. goto out2;
  1002. }
  1003. list_add(&ssi->link, &ssip_list);
  1004. dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n",
  1005. ssi->channel_id_cmd, ssi->channel_id_data);
  1006. return 0;
  1007. out2:
  1008. free_netdev(ssi->netdev);
  1009. out1:
  1010. ssip_free_cmds(ssi);
  1011. out:
  1012. kfree(ssi);
  1013. return err;
  1014. }
  1015. static int ssi_protocol_remove(struct device *dev)
  1016. {
  1017. struct hsi_client *cl = to_hsi_client(dev);
  1018. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  1019. list_del(&ssi->link);
  1020. unregister_netdev(ssi->netdev);
  1021. ssip_free_cmds(ssi);
  1022. hsi_client_set_drvdata(cl, NULL);
  1023. kfree(ssi);
  1024. return 0;
  1025. }
  1026. static struct hsi_client_driver ssip_driver = {
  1027. .driver = {
  1028. .name = "ssi-protocol",
  1029. .owner = THIS_MODULE,
  1030. .probe = ssi_protocol_probe,
  1031. .remove = ssi_protocol_remove,
  1032. },
  1033. };
  1034. static int __init ssip_init(void)
  1035. {
  1036. pr_info("SSI protocol aka McSAAB added\n");
  1037. return hsi_register_client_driver(&ssip_driver);
  1038. }
  1039. module_init(ssip_init);
  1040. static void __exit ssip_exit(void)
  1041. {
  1042. hsi_unregister_client_driver(&ssip_driver);
  1043. pr_info("SSI protocol driver removed\n");
  1044. }
  1045. module_exit(ssip_exit);
  1046. MODULE_ALIAS("hsi:ssi-protocol");
  1047. MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
  1048. MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>");
  1049. MODULE_DESCRIPTION("SSI protocol improved aka McSAAB");
  1050. MODULE_LICENSE("GPL");