gdm_mux.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/usb.h>
  17. #include <linux/errno.h>
  18. #include <linux/init.h>
  19. #include <linux/tty.h>
  20. #include <linux/tty_driver.h>
  21. #include <linux/tty_flip.h>
  22. #include <linux/slab.h>
  23. #include <linux/usb/cdc.h>
  24. #include "gdm_mux.h"
  25. static struct workqueue_struct *mux_rx_wq;
  26. static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
  27. #define USB_DEVICE_CDC_DATA(vid, pid) \
  28. .match_flags = \
  29. USB_DEVICE_ID_MATCH_DEVICE |\
  30. USB_DEVICE_ID_MATCH_INT_CLASS |\
  31. USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
  32. .idVendor = vid,\
  33. .idProduct = pid,\
  34. .bInterfaceClass = USB_CLASS_COMM,\
  35. .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
  36. static const struct usb_device_id id_table[] = {
  37. { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
  38. { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
  39. { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
  40. { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
  41. {}
  42. };
  43. MODULE_DEVICE_TABLE(usb, id_table);
  44. static int packet_type_to_index(u16 packetType)
  45. {
  46. int i;
  47. for (i = 0; i < TTY_MAX_COUNT; i++) {
  48. if (packet_type[i] == packetType)
  49. return i;
  50. }
  51. return -1;
  52. }
  53. static struct mux_tx *alloc_mux_tx(int len)
  54. {
  55. struct mux_tx *t = NULL;
  56. t = kzalloc(sizeof(*t), GFP_ATOMIC);
  57. if (!t)
  58. return NULL;
  59. t->urb = usb_alloc_urb(0, GFP_ATOMIC);
  60. t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
  61. if (!t->urb || !t->buf) {
  62. usb_free_urb(t->urb);
  63. kfree(t->buf);
  64. kfree(t);
  65. return NULL;
  66. }
  67. return t;
  68. }
  69. static void free_mux_tx(struct mux_tx *t)
  70. {
  71. if (t) {
  72. usb_free_urb(t->urb);
  73. kfree(t->buf);
  74. kfree(t);
  75. }
  76. }
  77. static struct mux_rx *alloc_mux_rx(void)
  78. {
  79. struct mux_rx *r = NULL;
  80. r = kzalloc(sizeof(*r), GFP_KERNEL);
  81. if (!r)
  82. return NULL;
  83. r->urb = usb_alloc_urb(0, GFP_KERNEL);
  84. r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_KERNEL);
  85. if (!r->urb || !r->buf) {
  86. usb_free_urb(r->urb);
  87. kfree(r->buf);
  88. kfree(r);
  89. return NULL;
  90. }
  91. return r;
  92. }
  93. static void free_mux_rx(struct mux_rx *r)
  94. {
  95. if (r) {
  96. usb_free_urb(r->urb);
  97. kfree(r->buf);
  98. kfree(r);
  99. }
  100. }
  101. static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
  102. {
  103. struct mux_rx *r;
  104. unsigned long flags;
  105. spin_lock_irqsave(&rx->free_list_lock, flags);
  106. if (list_empty(&rx->rx_free_list)) {
  107. spin_unlock_irqrestore(&rx->free_list_lock, flags);
  108. return NULL;
  109. }
  110. r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
  111. list_del(&r->free_list);
  112. spin_unlock_irqrestore(&rx->free_list_lock, flags);
  113. return r;
  114. }
  115. static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
  116. {
  117. unsigned long flags;
  118. spin_lock_irqsave(&rx->free_list_lock, flags);
  119. list_add_tail(&r->free_list, &rx->rx_free_list);
  120. spin_unlock_irqrestore(&rx->free_list_lock, flags);
  121. }
  122. static int up_to_host(struct mux_rx *r)
  123. {
  124. struct mux_dev *mux_dev = r->mux_dev;
  125. struct mux_pkt_header *mux_header;
  126. unsigned int start_flag;
  127. unsigned int payload_size;
  128. unsigned short packet_type;
  129. int total_len;
  130. u32 packet_size_sum = r->offset;
  131. int index;
  132. int ret = TO_HOST_INVALID_PACKET;
  133. int len = r->len;
  134. while (1) {
  135. mux_header = (struct mux_pkt_header *)(r->buf +
  136. packet_size_sum);
  137. start_flag = __le32_to_cpu(mux_header->start_flag);
  138. payload_size = __le32_to_cpu(mux_header->payload_size);
  139. packet_type = __le16_to_cpu(mux_header->packet_type);
  140. if (start_flag != START_FLAG) {
  141. pr_err("invalid START_FLAG %x\n", start_flag);
  142. break;
  143. }
  144. total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
  145. if (len - packet_size_sum <
  146. total_len) {
  147. pr_err("invalid payload : %d %d %04x\n",
  148. payload_size, len, packet_type);
  149. break;
  150. }
  151. index = packet_type_to_index(packet_type);
  152. if (index < 0) {
  153. pr_err("invalid index %d\n", index);
  154. break;
  155. }
  156. ret = r->callback(mux_header->data,
  157. payload_size,
  158. index,
  159. mux_dev->tty_dev,
  160. RECV_PACKET_PROCESS_CONTINUE
  161. );
  162. if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
  163. r->offset += packet_size_sum;
  164. break;
  165. }
  166. packet_size_sum += total_len;
  167. if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
  168. ret = r->callback(NULL,
  169. 0,
  170. index,
  171. mux_dev->tty_dev,
  172. RECV_PACKET_PROCESS_COMPLETE
  173. );
  174. break;
  175. }
  176. }
  177. return ret;
  178. }
  179. static void do_rx(struct work_struct *work)
  180. {
  181. struct mux_dev *mux_dev =
  182. container_of(work, struct mux_dev, work_rx.work);
  183. struct mux_rx *r;
  184. struct rx_cxt *rx = &mux_dev->rx;
  185. unsigned long flags;
  186. int ret = 0;
  187. while (1) {
  188. spin_lock_irqsave(&rx->to_host_lock, flags);
  189. if (list_empty(&rx->to_host_list)) {
  190. spin_unlock_irqrestore(&rx->to_host_lock, flags);
  191. break;
  192. }
  193. r = list_entry(rx->to_host_list.next, struct mux_rx,
  194. to_host_list);
  195. list_del(&r->to_host_list);
  196. spin_unlock_irqrestore(&rx->to_host_lock, flags);
  197. ret = up_to_host(r);
  198. if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
  199. pr_err("failed to send mux data to host\n");
  200. else
  201. put_rx_struct(rx, r);
  202. }
  203. }
  204. static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
  205. {
  206. unsigned long flags;
  207. struct mux_rx *r_remove, *r_remove_next;
  208. spin_lock_irqsave(&rx->submit_list_lock, flags);
  209. list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list,
  210. rx_submit_list) {
  211. if (r == r_remove)
  212. list_del(&r->rx_submit_list);
  213. }
  214. spin_unlock_irqrestore(&rx->submit_list_lock, flags);
  215. }
  216. static void gdm_mux_rcv_complete(struct urb *urb)
  217. {
  218. struct mux_rx *r = urb->context;
  219. struct mux_dev *mux_dev = r->mux_dev;
  220. struct rx_cxt *rx = &mux_dev->rx;
  221. unsigned long flags;
  222. remove_rx_submit_list(r, rx);
  223. if (urb->status) {
  224. if (mux_dev->usb_state == PM_NORMAL)
  225. dev_err(&urb->dev->dev, "%s: urb status error %d\n",
  226. __func__, urb->status);
  227. put_rx_struct(rx, r);
  228. } else {
  229. r->len = r->urb->actual_length;
  230. spin_lock_irqsave(&rx->to_host_lock, flags);
  231. list_add_tail(&r->to_host_list, &rx->to_host_list);
  232. queue_work(mux_rx_wq, &mux_dev->work_rx.work);
  233. spin_unlock_irqrestore(&rx->to_host_lock, flags);
  234. }
  235. }
  236. static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
  237. int tty_index, struct tty_dev *tty_dev, int complete))
  238. {
  239. struct mux_dev *mux_dev = priv_dev;
  240. struct usb_device *usbdev = mux_dev->usbdev;
  241. struct mux_rx *r;
  242. struct rx_cxt *rx = &mux_dev->rx;
  243. unsigned long flags;
  244. int ret;
  245. if (!usbdev) {
  246. pr_err("device is disconnected\n");
  247. return -ENODEV;
  248. }
  249. r = get_rx_struct(rx);
  250. if (!r) {
  251. pr_err("get_rx_struct fail\n");
  252. return -ENOMEM;
  253. }
  254. r->offset = 0;
  255. r->mux_dev = (void *)mux_dev;
  256. r->callback = cb;
  257. mux_dev->rx_cb = cb;
  258. usb_fill_bulk_urb(r->urb,
  259. usbdev,
  260. usb_rcvbulkpipe(usbdev, 0x86),
  261. r->buf,
  262. MUX_RX_MAX_SIZE,
  263. gdm_mux_rcv_complete,
  264. r);
  265. spin_lock_irqsave(&rx->submit_list_lock, flags);
  266. list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
  267. spin_unlock_irqrestore(&rx->submit_list_lock, flags);
  268. ret = usb_submit_urb(r->urb, GFP_KERNEL);
  269. if (ret) {
  270. spin_lock_irqsave(&rx->submit_list_lock, flags);
  271. list_del(&r->rx_submit_list);
  272. spin_unlock_irqrestore(&rx->submit_list_lock, flags);
  273. put_rx_struct(rx, r);
  274. pr_err("usb_submit_urb ret=%d\n", ret);
  275. }
  276. usb_mark_last_busy(usbdev);
  277. return ret;
  278. }
  279. static void gdm_mux_send_complete(struct urb *urb)
  280. {
  281. struct mux_tx *t = urb->context;
  282. if (urb->status == -ECONNRESET) {
  283. dev_info(&urb->dev->dev, "CONNRESET\n");
  284. free_mux_tx(t);
  285. return;
  286. }
  287. if (t->callback)
  288. t->callback(t->cb_data);
  289. free_mux_tx(t);
  290. }
  291. static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
  292. void (*cb)(void *data), void *cb_data)
  293. {
  294. struct mux_dev *mux_dev = priv_dev;
  295. struct usb_device *usbdev = mux_dev->usbdev;
  296. struct mux_pkt_header *mux_header;
  297. struct mux_tx *t = NULL;
  298. static u32 seq_num = 1;
  299. int total_len;
  300. int ret;
  301. unsigned long flags;
  302. if (mux_dev->usb_state == PM_SUSPEND) {
  303. ret = usb_autopm_get_interface(mux_dev->intf);
  304. if (!ret)
  305. usb_autopm_put_interface(mux_dev->intf);
  306. }
  307. spin_lock_irqsave(&mux_dev->write_lock, flags);
  308. total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
  309. t = alloc_mux_tx(total_len);
  310. if (!t) {
  311. pr_err("alloc_mux_tx fail\n");
  312. spin_unlock_irqrestore(&mux_dev->write_lock, flags);
  313. return -ENOMEM;
  314. }
  315. mux_header = (struct mux_pkt_header *)t->buf;
  316. mux_header->start_flag = __cpu_to_le32(START_FLAG);
  317. mux_header->seq_num = __cpu_to_le32(seq_num++);
  318. mux_header->payload_size = __cpu_to_le32((u32)len);
  319. mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
  320. memcpy(t->buf + MUX_HEADER_SIZE, data, len);
  321. memset(t->buf + MUX_HEADER_SIZE + len, 0, total_len - MUX_HEADER_SIZE -
  322. len);
  323. t->len = total_len;
  324. t->callback = cb;
  325. t->cb_data = cb_data;
  326. usb_fill_bulk_urb(t->urb,
  327. usbdev,
  328. usb_sndbulkpipe(usbdev, 5),
  329. t->buf,
  330. total_len,
  331. gdm_mux_send_complete,
  332. t);
  333. ret = usb_submit_urb(t->urb, GFP_ATOMIC);
  334. spin_unlock_irqrestore(&mux_dev->write_lock, flags);
  335. if (ret)
  336. pr_err("usb_submit_urb Error: %d\n", ret);
  337. usb_mark_last_busy(usbdev);
  338. return ret;
  339. }
  340. static int gdm_mux_send_control(void *priv_dev, int request, int value,
  341. void *buf, int len)
  342. {
  343. struct mux_dev *mux_dev = priv_dev;
  344. struct usb_device *usbdev = mux_dev->usbdev;
  345. int ret;
  346. ret = usb_control_msg(usbdev,
  347. usb_sndctrlpipe(usbdev, 0),
  348. request,
  349. USB_RT_ACM,
  350. value,
  351. 2,
  352. buf,
  353. len,
  354. 5000
  355. );
  356. if (ret < 0)
  357. pr_err("usb_control_msg error: %d\n", ret);
  358. return ret < 0 ? ret : 0;
  359. }
  360. static void release_usb(struct mux_dev *mux_dev)
  361. {
  362. struct rx_cxt *rx = &mux_dev->rx;
  363. struct mux_rx *r, *r_next;
  364. unsigned long flags;
  365. cancel_delayed_work(&mux_dev->work_rx);
  366. spin_lock_irqsave(&rx->submit_list_lock, flags);
  367. list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
  368. rx_submit_list) {
  369. spin_unlock_irqrestore(&rx->submit_list_lock, flags);
  370. usb_kill_urb(r->urb);
  371. spin_lock_irqsave(&rx->submit_list_lock, flags);
  372. }
  373. spin_unlock_irqrestore(&rx->submit_list_lock, flags);
  374. spin_lock_irqsave(&rx->free_list_lock, flags);
  375. list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
  376. list_del(&r->free_list);
  377. free_mux_rx(r);
  378. }
  379. spin_unlock_irqrestore(&rx->free_list_lock, flags);
  380. spin_lock_irqsave(&rx->to_host_lock, flags);
  381. list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
  382. if (r->mux_dev == (void *)mux_dev) {
  383. list_del(&r->to_host_list);
  384. free_mux_rx(r);
  385. }
  386. }
  387. spin_unlock_irqrestore(&rx->to_host_lock, flags);
  388. }
  389. static int init_usb(struct mux_dev *mux_dev)
  390. {
  391. struct mux_rx *r;
  392. struct rx_cxt *rx = &mux_dev->rx;
  393. int ret = 0;
  394. int i;
  395. spin_lock_init(&mux_dev->write_lock);
  396. INIT_LIST_HEAD(&rx->to_host_list);
  397. INIT_LIST_HEAD(&rx->rx_submit_list);
  398. INIT_LIST_HEAD(&rx->rx_free_list);
  399. spin_lock_init(&rx->to_host_lock);
  400. spin_lock_init(&rx->submit_list_lock);
  401. spin_lock_init(&rx->free_list_lock);
  402. for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
  403. r = alloc_mux_rx();
  404. if (!r) {
  405. ret = -ENOMEM;
  406. break;
  407. }
  408. list_add(&r->free_list, &rx->rx_free_list);
  409. }
  410. INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
  411. return ret;
  412. }
  413. static int gdm_mux_probe(struct usb_interface *intf,
  414. const struct usb_device_id *id)
  415. {
  416. struct mux_dev *mux_dev;
  417. struct tty_dev *tty_dev;
  418. u16 idVendor, idProduct;
  419. int bInterfaceNumber;
  420. int ret;
  421. int i;
  422. struct usb_device *usbdev = interface_to_usbdev(intf);
  423. bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
  424. idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
  425. idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
  426. pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
  427. if (bInterfaceNumber != 2)
  428. return -ENODEV;
  429. mux_dev = kzalloc(sizeof(*mux_dev), GFP_KERNEL);
  430. if (!mux_dev)
  431. return -ENOMEM;
  432. tty_dev = kzalloc(sizeof(*tty_dev), GFP_KERNEL);
  433. if (!tty_dev) {
  434. ret = -ENOMEM;
  435. goto err_free_mux;
  436. }
  437. mux_dev->usbdev = usbdev;
  438. mux_dev->control_intf = intf;
  439. ret = init_usb(mux_dev);
  440. if (ret)
  441. goto err_free_usb;
  442. tty_dev->priv_dev = (void *)mux_dev;
  443. tty_dev->send_func = gdm_mux_send;
  444. tty_dev->recv_func = gdm_mux_recv;
  445. tty_dev->send_control = gdm_mux_send_control;
  446. ret = register_lte_tty_device(tty_dev, &intf->dev);
  447. if (ret)
  448. goto err_unregister_tty;
  449. for (i = 0; i < TTY_MAX_COUNT; i++)
  450. mux_dev->tty_dev = tty_dev;
  451. mux_dev->intf = intf;
  452. mux_dev->usb_state = PM_NORMAL;
  453. usb_get_dev(usbdev);
  454. usb_set_intfdata(intf, tty_dev);
  455. return 0;
  456. err_unregister_tty:
  457. unregister_lte_tty_device(tty_dev);
  458. err_free_usb:
  459. release_usb(mux_dev);
  460. kfree(tty_dev);
  461. err_free_mux:
  462. kfree(mux_dev);
  463. return ret;
  464. }
  465. static void gdm_mux_disconnect(struct usb_interface *intf)
  466. {
  467. struct tty_dev *tty_dev;
  468. struct mux_dev *mux_dev;
  469. struct usb_device *usbdev = interface_to_usbdev(intf);
  470. tty_dev = usb_get_intfdata(intf);
  471. mux_dev = tty_dev->priv_dev;
  472. release_usb(mux_dev);
  473. unregister_lte_tty_device(tty_dev);
  474. kfree(mux_dev);
  475. kfree(tty_dev);
  476. usb_put_dev(usbdev);
  477. }
  478. static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
  479. {
  480. struct tty_dev *tty_dev;
  481. struct mux_dev *mux_dev;
  482. struct rx_cxt *rx;
  483. struct mux_rx *r, *r_next;
  484. unsigned long flags;
  485. tty_dev = usb_get_intfdata(intf);
  486. mux_dev = tty_dev->priv_dev;
  487. rx = &mux_dev->rx;
  488. if (mux_dev->usb_state != PM_NORMAL) {
  489. dev_err(intf->usb_dev, "usb suspend - invalid state\n");
  490. return -1;
  491. }
  492. mux_dev->usb_state = PM_SUSPEND;
  493. spin_lock_irqsave(&rx->submit_list_lock, flags);
  494. list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
  495. rx_submit_list) {
  496. spin_unlock_irqrestore(&rx->submit_list_lock, flags);
  497. usb_kill_urb(r->urb);
  498. spin_lock_irqsave(&rx->submit_list_lock, flags);
  499. }
  500. spin_unlock_irqrestore(&rx->submit_list_lock, flags);
  501. return 0;
  502. }
  503. static int gdm_mux_resume(struct usb_interface *intf)
  504. {
  505. struct tty_dev *tty_dev;
  506. struct mux_dev *mux_dev;
  507. u8 i;
  508. tty_dev = usb_get_intfdata(intf);
  509. mux_dev = tty_dev->priv_dev;
  510. if (mux_dev->usb_state != PM_SUSPEND) {
  511. dev_err(intf->usb_dev, "usb resume - invalid state\n");
  512. return -1;
  513. }
  514. mux_dev->usb_state = PM_NORMAL;
  515. for (i = 0; i < MAX_ISSUE_NUM; i++)
  516. gdm_mux_recv(mux_dev, mux_dev->rx_cb);
  517. return 0;
  518. }
  519. static struct usb_driver gdm_mux_driver = {
  520. .name = "gdm_mux",
  521. .probe = gdm_mux_probe,
  522. .disconnect = gdm_mux_disconnect,
  523. .id_table = id_table,
  524. .supports_autosuspend = 1,
  525. .suspend = gdm_mux_suspend,
  526. .resume = gdm_mux_resume,
  527. .reset_resume = gdm_mux_resume,
  528. };
  529. static int __init gdm_usb_mux_init(void)
  530. {
  531. mux_rx_wq = create_workqueue("mux_rx_wq");
  532. if (!mux_rx_wq) {
  533. pr_err("work queue create fail\n");
  534. return -1;
  535. }
  536. register_lte_tty_driver();
  537. return usb_register(&gdm_mux_driver);
  538. }
  539. static void __exit gdm_usb_mux_exit(void)
  540. {
  541. if (mux_rx_wq) {
  542. flush_workqueue(mux_rx_wq);
  543. destroy_workqueue(mux_rx_wq);
  544. }
  545. usb_deregister(&gdm_mux_driver);
  546. unregister_lte_tty_driver();
  547. }
  548. module_init(gdm_usb_mux_init);
  549. module_exit(gdm_usb_mux_exit);
  550. MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
  551. MODULE_LICENSE("GPL");