gdm_usb.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/usb.h>
  17. #include <linux/sched.h>
  18. #include <linux/kthread.h>
  19. #include <linux/usb/cdc.h>
  20. #include <linux/wait.h>
  21. #include <linux/if_ether.h>
  22. #include <linux/pm_runtime.h>
  23. #include "gdm_usb.h"
  24. #include "gdm_lte.h"
  25. #include "hci.h"
  26. #include "hci_packet.h"
  27. #include "gdm_endian.h"
  28. #define USB_DEVICE_CDC_DATA(vid, pid) \
  29. .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
  30. USB_DEVICE_ID_MATCH_INT_CLASS | \
  31. USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
  32. .idVendor = vid,\
  33. .idProduct = pid,\
  34. .bInterfaceClass = USB_CLASS_COMM,\
  35. .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
  36. #define USB_DEVICE_MASS_DATA(vid, pid) \
  37. .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
  38. USB_DEVICE_ID_MATCH_INT_INFO,\
  39. .idVendor = vid,\
  40. .idProduct = pid,\
  41. .bInterfaceSubClass = USB_SC_SCSI, \
  42. .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
  43. .bInterfaceProtocol = USB_PR_BULK
  44. static const struct usb_device_id id_table[] = {
  45. { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
  46. { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
  47. { }
  48. };
  49. MODULE_DEVICE_TABLE(usb, id_table);
  50. static struct workqueue_struct *usb_tx_wq;
  51. static struct workqueue_struct *usb_rx_wq;
  52. static void do_tx(struct work_struct *work);
  53. static void do_rx(struct work_struct *work);
  54. static int gdm_usb_recv(void *priv_dev,
  55. int (*cb)(void *cb_data,
  56. void *data, int len, int context),
  57. void *cb_data,
  58. int context);
  59. static int request_mac_address(struct lte_udev *udev)
  60. {
  61. u8 buf[16] = {0,};
  62. struct hci_packet *hci = (struct hci_packet *)buf;
  63. struct usb_device *usbdev = udev->usbdev;
  64. int actual;
  65. int ret = -1;
  66. hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
  67. hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
  68. hci->data[0] = MAC_ADDRESS;
  69. ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
  70. &actual, 1000);
  71. udev->request_mac_addr = 1;
  72. return ret;
  73. }
  74. static struct usb_tx *alloc_tx_struct(int len)
  75. {
  76. struct usb_tx *t = NULL;
  77. int ret = 0;
  78. t = kzalloc(sizeof(*t), GFP_ATOMIC);
  79. if (!t) {
  80. ret = -ENOMEM;
  81. goto out;
  82. }
  83. t->urb = usb_alloc_urb(0, GFP_ATOMIC);
  84. if (!(len % 512))
  85. len++;
  86. t->buf = kmalloc(len, GFP_ATOMIC);
  87. if (!t->urb || !t->buf) {
  88. ret = -ENOMEM;
  89. goto out;
  90. }
  91. out:
  92. if (ret < 0) {
  93. if (t) {
  94. usb_free_urb(t->urb);
  95. kfree(t->buf);
  96. kfree(t);
  97. }
  98. return NULL;
  99. }
  100. return t;
  101. }
  102. static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
  103. {
  104. struct usb_tx_sdu *t_sdu;
  105. t_sdu = kzalloc(sizeof(*t_sdu), GFP_KERNEL);
  106. if (!t_sdu)
  107. return NULL;
  108. t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
  109. if (!t_sdu->buf) {
  110. kfree(t_sdu);
  111. return NULL;
  112. }
  113. return t_sdu;
  114. }
  115. static void free_tx_struct(struct usb_tx *t)
  116. {
  117. if (t) {
  118. usb_free_urb(t->urb);
  119. kfree(t->buf);
  120. kfree(t);
  121. }
  122. }
  123. static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
  124. {
  125. if (t_sdu) {
  126. kfree(t_sdu->buf);
  127. kfree(t_sdu);
  128. }
  129. }
  130. static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
  131. {
  132. struct usb_tx_sdu *t_sdu;
  133. if (list_empty(&tx->free_list))
  134. return NULL;
  135. t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
  136. list_del(&t_sdu->list);
  137. tx->avail_count--;
  138. *no_spc = list_empty(&tx->free_list) ? 1 : 0;
  139. return t_sdu;
  140. }
  141. static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
  142. {
  143. list_add_tail(&t_sdu->list, &tx->free_list);
  144. tx->avail_count++;
  145. }
  146. static struct usb_rx *alloc_rx_struct(void)
  147. {
  148. struct usb_rx *r = NULL;
  149. int ret = 0;
  150. r = kmalloc(sizeof(*r), GFP_KERNEL);
  151. if (!r) {
  152. ret = -ENOMEM;
  153. goto out;
  154. }
  155. r->urb = usb_alloc_urb(0, GFP_KERNEL);
  156. r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
  157. if (!r->urb || !r->buf) {
  158. ret = -ENOMEM;
  159. goto out;
  160. }
  161. out:
  162. if (ret < 0) {
  163. if (r) {
  164. usb_free_urb(r->urb);
  165. kfree(r->buf);
  166. kfree(r);
  167. }
  168. return NULL;
  169. }
  170. return r;
  171. }
  172. static void free_rx_struct(struct usb_rx *r)
  173. {
  174. if (r) {
  175. usb_free_urb(r->urb);
  176. kfree(r->buf);
  177. kfree(r);
  178. }
  179. }
  180. static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
  181. {
  182. struct usb_rx *r;
  183. unsigned long flags;
  184. spin_lock_irqsave(&rx->rx_lock, flags);
  185. if (list_empty(&rx->free_list)) {
  186. spin_unlock_irqrestore(&rx->rx_lock, flags);
  187. return NULL;
  188. }
  189. r = list_entry(rx->free_list.next, struct usb_rx, free_list);
  190. list_del(&r->free_list);
  191. rx->avail_count--;
  192. *no_spc = list_empty(&rx->free_list) ? 1 : 0;
  193. spin_unlock_irqrestore(&rx->rx_lock, flags);
  194. return r;
  195. }
  196. static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
  197. {
  198. unsigned long flags;
  199. spin_lock_irqsave(&rx->rx_lock, flags);
  200. list_add_tail(&r->free_list, &rx->free_list);
  201. rx->avail_count++;
  202. spin_unlock_irqrestore(&rx->rx_lock, flags);
  203. }
  204. static void release_usb(struct lte_udev *udev)
  205. {
  206. struct rx_cxt *rx = &udev->rx;
  207. struct tx_cxt *tx = &udev->tx;
  208. struct usb_tx *t, *t_next;
  209. struct usb_rx *r, *r_next;
  210. struct usb_tx_sdu *t_sdu, *t_sdu_next;
  211. unsigned long flags;
  212. spin_lock_irqsave(&tx->lock, flags);
  213. list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
  214. list_del(&t_sdu->list);
  215. free_tx_sdu_struct(t_sdu);
  216. }
  217. list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
  218. list_del(&t->list);
  219. free_tx_struct(t);
  220. }
  221. list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
  222. list_del(&t_sdu->list);
  223. free_tx_sdu_struct(t_sdu);
  224. }
  225. spin_unlock_irqrestore(&tx->lock, flags);
  226. spin_lock_irqsave(&rx->submit_lock, flags);
  227. list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
  228. rx_submit_list) {
  229. spin_unlock_irqrestore(&rx->submit_lock, flags);
  230. usb_kill_urb(r->urb);
  231. spin_lock_irqsave(&rx->submit_lock, flags);
  232. }
  233. spin_unlock_irqrestore(&rx->submit_lock, flags);
  234. spin_lock_irqsave(&rx->rx_lock, flags);
  235. list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
  236. list_del(&r->free_list);
  237. free_rx_struct(r);
  238. }
  239. spin_unlock_irqrestore(&rx->rx_lock, flags);
  240. spin_lock_irqsave(&rx->to_host_lock, flags);
  241. list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
  242. if (r->index == (void *)udev) {
  243. list_del(&r->to_host_list);
  244. free_rx_struct(r);
  245. }
  246. }
  247. spin_unlock_irqrestore(&rx->to_host_lock, flags);
  248. }
  249. static int init_usb(struct lte_udev *udev)
  250. {
  251. int ret = 0;
  252. int i;
  253. struct tx_cxt *tx = &udev->tx;
  254. struct rx_cxt *rx = &udev->rx;
  255. struct usb_tx_sdu *t_sdu = NULL;
  256. struct usb_rx *r = NULL;
  257. udev->send_complete = 1;
  258. udev->tx_stop = 0;
  259. udev->request_mac_addr = 0;
  260. udev->usb_state = PM_NORMAL;
  261. INIT_LIST_HEAD(&tx->sdu_list);
  262. INIT_LIST_HEAD(&tx->hci_list);
  263. INIT_LIST_HEAD(&tx->free_list);
  264. INIT_LIST_HEAD(&rx->rx_submit_list);
  265. INIT_LIST_HEAD(&rx->free_list);
  266. INIT_LIST_HEAD(&rx->to_host_list);
  267. spin_lock_init(&tx->lock);
  268. spin_lock_init(&rx->rx_lock);
  269. spin_lock_init(&rx->submit_lock);
  270. spin_lock_init(&rx->to_host_lock);
  271. tx->avail_count = 0;
  272. rx->avail_count = 0;
  273. udev->rx_cb = NULL;
  274. for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
  275. t_sdu = alloc_tx_sdu_struct();
  276. if (!t_sdu) {
  277. ret = -ENOMEM;
  278. goto fail;
  279. }
  280. list_add(&t_sdu->list, &tx->free_list);
  281. tx->avail_count++;
  282. }
  283. for (i = 0; i < MAX_RX_SUBMIT_COUNT * 2; i++) {
  284. r = alloc_rx_struct();
  285. if (!r) {
  286. ret = -ENOMEM;
  287. goto fail;
  288. }
  289. list_add(&r->free_list, &rx->free_list);
  290. rx->avail_count++;
  291. }
  292. INIT_DELAYED_WORK(&udev->work_tx, do_tx);
  293. INIT_DELAYED_WORK(&udev->work_rx, do_rx);
  294. return 0;
  295. fail:
  296. release_usb(udev);
  297. return ret;
  298. }
  299. static int set_mac_address(u8 *data, void *arg)
  300. {
  301. struct phy_dev *phy_dev = arg;
  302. struct lte_udev *udev = phy_dev->priv_dev;
  303. struct tlv *tlv = (struct tlv *)data;
  304. u8 mac_address[ETH_ALEN] = {0, };
  305. if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
  306. memcpy(mac_address, tlv->data, tlv->len);
  307. if (register_lte_device(phy_dev,
  308. &udev->intf->dev, mac_address) < 0)
  309. pr_err("register lte device failed\n");
  310. udev->request_mac_addr = 0;
  311. return 1;
  312. }
  313. return 0;
  314. }
  315. static void do_rx(struct work_struct *work)
  316. {
  317. struct lte_udev *udev =
  318. container_of(work, struct lte_udev, work_rx.work);
  319. struct rx_cxt *rx = &udev->rx;
  320. struct usb_rx *r;
  321. struct hci_packet *hci;
  322. struct phy_dev *phy_dev;
  323. u16 cmd_evt;
  324. int ret;
  325. unsigned long flags;
  326. while (1) {
  327. spin_lock_irqsave(&rx->to_host_lock, flags);
  328. if (list_empty(&rx->to_host_list)) {
  329. spin_unlock_irqrestore(&rx->to_host_lock, flags);
  330. break;
  331. }
  332. r = list_entry(rx->to_host_list.next,
  333. struct usb_rx, to_host_list);
  334. list_del(&r->to_host_list);
  335. spin_unlock_irqrestore(&rx->to_host_lock, flags);
  336. phy_dev = r->cb_data;
  337. udev = phy_dev->priv_dev;
  338. hci = (struct hci_packet *)r->buf;
  339. cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
  340. switch (cmd_evt) {
  341. case LTE_GET_INFORMATION_RESULT:
  342. if (set_mac_address(hci->data, r->cb_data) == 0) {
  343. ret = r->callback(r->cb_data,
  344. r->buf,
  345. r->urb->actual_length,
  346. KERNEL_THREAD);
  347. }
  348. break;
  349. default:
  350. if (r->callback) {
  351. ret = r->callback(r->cb_data,
  352. r->buf,
  353. r->urb->actual_length,
  354. KERNEL_THREAD);
  355. if (ret == -EAGAIN)
  356. pr_err("failed to send received data\n");
  357. }
  358. break;
  359. }
  360. put_rx_struct(rx, r);
  361. gdm_usb_recv(udev,
  362. r->callback,
  363. r->cb_data,
  364. USB_COMPLETE);
  365. }
  366. }
  367. static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
  368. {
  369. unsigned long flags;
  370. struct usb_rx *r_remove, *r_remove_next;
  371. spin_lock_irqsave(&rx->submit_lock, flags);
  372. list_for_each_entry_safe(r_remove, r_remove_next,
  373. &rx->rx_submit_list, rx_submit_list) {
  374. if (r == r_remove) {
  375. list_del(&r->rx_submit_list);
  376. break;
  377. }
  378. }
  379. spin_unlock_irqrestore(&rx->submit_lock, flags);
  380. }
  381. static void gdm_usb_rcv_complete(struct urb *urb)
  382. {
  383. struct usb_rx *r = urb->context;
  384. struct rx_cxt *rx = r->rx;
  385. unsigned long flags;
  386. struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
  387. struct usb_device *usbdev = udev->usbdev;
  388. remove_rx_submit_list(r, rx);
  389. if (!urb->status && r->callback) {
  390. spin_lock_irqsave(&rx->to_host_lock, flags);
  391. list_add_tail(&r->to_host_list, &rx->to_host_list);
  392. queue_work(usb_rx_wq, &udev->work_rx.work);
  393. spin_unlock_irqrestore(&rx->to_host_lock, flags);
  394. } else {
  395. if (urb->status && udev->usb_state == PM_NORMAL)
  396. dev_err(&urb->dev->dev, "%s: urb status error %d\n",
  397. __func__, urb->status);
  398. put_rx_struct(rx, r);
  399. }
  400. usb_mark_last_busy(usbdev);
  401. }
  402. static int gdm_usb_recv(void *priv_dev,
  403. int (*cb)(void *cb_data,
  404. void *data, int len, int context),
  405. void *cb_data,
  406. int context)
  407. {
  408. struct lte_udev *udev = priv_dev;
  409. struct usb_device *usbdev = udev->usbdev;
  410. struct rx_cxt *rx = &udev->rx;
  411. struct usb_rx *r;
  412. int no_spc;
  413. int ret;
  414. unsigned long flags;
  415. if (!udev->usbdev) {
  416. pr_err("invalid device\n");
  417. return -ENODEV;
  418. }
  419. r = get_rx_struct(rx, &no_spc);
  420. if (!r) {
  421. pr_err("Out of Memory\n");
  422. return -ENOMEM;
  423. }
  424. udev->rx_cb = cb;
  425. r->callback = cb;
  426. r->cb_data = cb_data;
  427. r->index = (void *)udev;
  428. r->rx = rx;
  429. usb_fill_bulk_urb(r->urb,
  430. usbdev,
  431. usb_rcvbulkpipe(usbdev, 0x83),
  432. r->buf,
  433. RX_BUF_SIZE,
  434. gdm_usb_rcv_complete,
  435. r);
  436. spin_lock_irqsave(&rx->submit_lock, flags);
  437. list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
  438. spin_unlock_irqrestore(&rx->submit_lock, flags);
  439. if (context == KERNEL_THREAD)
  440. ret = usb_submit_urb(r->urb, GFP_KERNEL);
  441. else
  442. ret = usb_submit_urb(r->urb, GFP_ATOMIC);
  443. if (ret) {
  444. spin_lock_irqsave(&rx->submit_lock, flags);
  445. list_del(&r->rx_submit_list);
  446. spin_unlock_irqrestore(&rx->submit_lock, flags);
  447. pr_err("usb_submit_urb failed (%p)\n", r);
  448. put_rx_struct(rx, r);
  449. }
  450. return ret;
  451. }
  452. static void gdm_usb_send_complete(struct urb *urb)
  453. {
  454. struct usb_tx *t = urb->context;
  455. struct tx_cxt *tx = t->tx;
  456. struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
  457. unsigned long flags;
  458. if (urb->status == -ECONNRESET) {
  459. dev_info(&urb->dev->dev, "CONNRESET\n");
  460. return;
  461. }
  462. if (t->callback)
  463. t->callback(t->cb_data);
  464. free_tx_struct(t);
  465. spin_lock_irqsave(&tx->lock, flags);
  466. udev->send_complete = 1;
  467. queue_work(usb_tx_wq, &udev->work_tx.work);
  468. spin_unlock_irqrestore(&tx->lock, flags);
  469. }
  470. static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
  471. {
  472. int ret = 0;
  473. if (!(len % 512))
  474. len++;
  475. usb_fill_bulk_urb(t->urb,
  476. usbdev,
  477. usb_sndbulkpipe(usbdev, 2),
  478. t->buf,
  479. len,
  480. gdm_usb_send_complete,
  481. t);
  482. ret = usb_submit_urb(t->urb, GFP_ATOMIC);
  483. if (ret)
  484. dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n",
  485. ret);
  486. usb_mark_last_busy(usbdev);
  487. return ret;
  488. }
  489. static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
  490. {
  491. struct tx_cxt *tx = &udev->tx;
  492. struct usb_tx_sdu *t_sdu = NULL;
  493. struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
  494. u16 send_len = 0;
  495. u16 num_packet = 0;
  496. unsigned long flags;
  497. multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
  498. while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
  499. spin_lock_irqsave(&tx->lock, flags);
  500. if (list_empty(&tx->sdu_list)) {
  501. spin_unlock_irqrestore(&tx->lock, flags);
  502. break;
  503. }
  504. t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
  505. if (send_len + t_sdu->len > MAX_SDU_SIZE) {
  506. spin_unlock_irqrestore(&tx->lock, flags);
  507. break;
  508. }
  509. list_del(&t_sdu->list);
  510. spin_unlock_irqrestore(&tx->lock, flags);
  511. memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
  512. send_len += (t_sdu->len + 3) & 0xfffc;
  513. num_packet++;
  514. if (tx->avail_count > 10)
  515. t_sdu->callback(t_sdu->cb_data);
  516. spin_lock_irqsave(&tx->lock, flags);
  517. put_tx_struct(tx, t_sdu);
  518. spin_unlock_irqrestore(&tx->lock, flags);
  519. }
  520. multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
  521. multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
  522. return send_len + offsetof(struct multi_sdu, data);
  523. }
  524. static void do_tx(struct work_struct *work)
  525. {
  526. struct lte_udev *udev =
  527. container_of(work, struct lte_udev, work_tx.work);
  528. struct usb_device *usbdev = udev->usbdev;
  529. struct tx_cxt *tx = &udev->tx;
  530. struct usb_tx *t = NULL;
  531. int is_send = 0;
  532. u32 len = 0;
  533. unsigned long flags;
  534. if (!usb_autopm_get_interface(udev->intf))
  535. usb_autopm_put_interface(udev->intf);
  536. if (udev->usb_state == PM_SUSPEND)
  537. return;
  538. spin_lock_irqsave(&tx->lock, flags);
  539. if (!udev->send_complete) {
  540. spin_unlock_irqrestore(&tx->lock, flags);
  541. return;
  542. }
  543. udev->send_complete = 0;
  544. if (!list_empty(&tx->hci_list)) {
  545. t = list_entry(tx->hci_list.next, struct usb_tx, list);
  546. list_del(&t->list);
  547. len = t->len;
  548. t->is_sdu = 0;
  549. is_send = 1;
  550. } else if (!list_empty(&tx->sdu_list)) {
  551. if (udev->tx_stop) {
  552. udev->send_complete = 1;
  553. spin_unlock_irqrestore(&tx->lock, flags);
  554. return;
  555. }
  556. t = alloc_tx_struct(TX_BUF_SIZE);
  557. if (!t) {
  558. spin_unlock_irqrestore(&tx->lock, flags);
  559. return;
  560. }
  561. t->callback = NULL;
  562. t->tx = tx;
  563. t->is_sdu = 1;
  564. is_send = 1;
  565. }
  566. if (!is_send) {
  567. udev->send_complete = 1;
  568. spin_unlock_irqrestore(&tx->lock, flags);
  569. return;
  570. }
  571. spin_unlock_irqrestore(&tx->lock, flags);
  572. if (t->is_sdu)
  573. len = packet_aggregation(udev, t->buf);
  574. if (send_tx_packet(usbdev, t, len)) {
  575. pr_err("send_tx_packet failed\n");
  576. t->callback = NULL;
  577. gdm_usb_send_complete(t->urb);
  578. }
  579. }
  580. #define SDU_PARAM_LEN 12
  581. static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
  582. unsigned int dftEpsId, unsigned int epsId,
  583. void (*cb)(void *data), void *cb_data,
  584. int dev_idx, int nic_type)
  585. {
  586. struct lte_udev *udev = priv_dev;
  587. struct tx_cxt *tx = &udev->tx;
  588. struct usb_tx_sdu *t_sdu;
  589. struct sdu *sdu = NULL;
  590. unsigned long flags;
  591. int no_spc = 0;
  592. u16 send_len;
  593. if (!udev->usbdev) {
  594. pr_err("sdu send - invalid device\n");
  595. return TX_NO_DEV;
  596. }
  597. spin_lock_irqsave(&tx->lock, flags);
  598. t_sdu = get_tx_sdu_struct(tx, &no_spc);
  599. spin_unlock_irqrestore(&tx->lock, flags);
  600. if (!t_sdu) {
  601. pr_err("sdu send - free list empty\n");
  602. return TX_NO_SPC;
  603. }
  604. sdu = (struct sdu *)t_sdu->buf;
  605. sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
  606. if (nic_type == NIC_TYPE_ARP) {
  607. send_len = len + SDU_PARAM_LEN;
  608. memcpy(sdu->data, data, len);
  609. } else {
  610. send_len = len - ETH_HLEN;
  611. send_len += SDU_PARAM_LEN;
  612. memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
  613. }
  614. sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
  615. sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
  616. sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
  617. sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
  618. t_sdu->len = send_len + HCI_HEADER_SIZE;
  619. t_sdu->callback = cb;
  620. t_sdu->cb_data = cb_data;
  621. spin_lock_irqsave(&tx->lock, flags);
  622. list_add_tail(&t_sdu->list, &tx->sdu_list);
  623. queue_work(usb_tx_wq, &udev->work_tx.work);
  624. spin_unlock_irqrestore(&tx->lock, flags);
  625. if (no_spc)
  626. return TX_NO_BUFFER;
  627. return 0;
  628. }
  629. static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
  630. void (*cb)(void *data), void *cb_data)
  631. {
  632. struct lte_udev *udev = priv_dev;
  633. struct tx_cxt *tx = &udev->tx;
  634. struct usb_tx *t;
  635. unsigned long flags;
  636. if (!udev->usbdev) {
  637. pr_err("hci send - invalid device\n");
  638. return -ENODEV;
  639. }
  640. t = alloc_tx_struct(len);
  641. if (!t) {
  642. pr_err("hci_send - out of memory\n");
  643. return -ENOMEM;
  644. }
  645. memcpy(t->buf, data, len);
  646. t->callback = cb;
  647. t->cb_data = cb_data;
  648. t->len = len;
  649. t->tx = tx;
  650. t->is_sdu = 0;
  651. spin_lock_irqsave(&tx->lock, flags);
  652. list_add_tail(&t->list, &tx->hci_list);
  653. queue_work(usb_tx_wq, &udev->work_tx.work);
  654. spin_unlock_irqrestore(&tx->lock, flags);
  655. return 0;
  656. }
  657. static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
  658. {
  659. struct lte_udev *udev = priv_dev;
  660. return &udev->gdm_ed;
  661. }
  662. static int gdm_usb_probe(struct usb_interface *intf,
  663. const struct usb_device_id *id)
  664. {
  665. int ret = 0;
  666. struct phy_dev *phy_dev = NULL;
  667. struct lte_udev *udev = NULL;
  668. u16 idVendor, idProduct;
  669. int bInterfaceNumber;
  670. struct usb_device *usbdev = interface_to_usbdev(intf);
  671. bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
  672. idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
  673. idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
  674. pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
  675. if (bInterfaceNumber > NETWORK_INTERFACE) {
  676. pr_info("not a network device\n");
  677. return -ENODEV;
  678. }
  679. phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
  680. if (!phy_dev)
  681. return -ENOMEM;
  682. udev = kzalloc(sizeof(*udev), GFP_KERNEL);
  683. if (!udev) {
  684. ret = -ENOMEM;
  685. goto err_udev;
  686. }
  687. phy_dev->priv_dev = (void *)udev;
  688. phy_dev->send_hci_func = gdm_usb_hci_send;
  689. phy_dev->send_sdu_func = gdm_usb_sdu_send;
  690. phy_dev->rcv_func = gdm_usb_recv;
  691. phy_dev->get_endian = gdm_usb_get_endian;
  692. udev->usbdev = usbdev;
  693. ret = init_usb(udev);
  694. if (ret < 0) {
  695. dev_err(intf->usb_dev, "init_usb func failed\n");
  696. goto err_init_usb;
  697. }
  698. udev->intf = intf;
  699. intf->needs_remote_wakeup = 1;
  700. usb_enable_autosuspend(usbdev);
  701. pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
  702. /* List up hosts with big endians, otherwise,
  703. * defaults to little endian
  704. */
  705. if (idProduct == PID_GDM7243)
  706. gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
  707. else
  708. gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
  709. ret = request_mac_address(udev);
  710. if (ret < 0) {
  711. dev_err(intf->usb_dev, "request Mac address failed\n");
  712. goto err_mac_address;
  713. }
  714. start_rx_proc(phy_dev);
  715. usb_get_dev(usbdev);
  716. usb_set_intfdata(intf, phy_dev);
  717. return 0;
  718. err_mac_address:
  719. release_usb(udev);
  720. err_init_usb:
  721. kfree(udev);
  722. err_udev:
  723. kfree(phy_dev);
  724. return ret;
  725. }
  726. static void gdm_usb_disconnect(struct usb_interface *intf)
  727. {
  728. struct phy_dev *phy_dev;
  729. struct lte_udev *udev;
  730. u16 idVendor, idProduct;
  731. struct usb_device *usbdev;
  732. usbdev = interface_to_usbdev(intf);
  733. idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
  734. idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
  735. phy_dev = usb_get_intfdata(intf);
  736. udev = phy_dev->priv_dev;
  737. unregister_lte_device(phy_dev);
  738. release_usb(udev);
  739. kfree(udev);
  740. udev = NULL;
  741. kfree(phy_dev);
  742. phy_dev = NULL;
  743. usb_put_dev(usbdev);
  744. }
  745. static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
  746. {
  747. struct phy_dev *phy_dev;
  748. struct lte_udev *udev;
  749. struct rx_cxt *rx;
  750. struct usb_rx *r;
  751. struct usb_rx *r_next;
  752. unsigned long flags;
  753. phy_dev = usb_get_intfdata(intf);
  754. udev = phy_dev->priv_dev;
  755. rx = &udev->rx;
  756. if (udev->usb_state != PM_NORMAL) {
  757. dev_err(intf->usb_dev, "usb suspend - invalid state\n");
  758. return -1;
  759. }
  760. udev->usb_state = PM_SUSPEND;
  761. spin_lock_irqsave(&rx->submit_lock, flags);
  762. list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
  763. rx_submit_list) {
  764. spin_unlock_irqrestore(&rx->submit_lock, flags);
  765. usb_kill_urb(r->urb);
  766. spin_lock_irqsave(&rx->submit_lock, flags);
  767. }
  768. spin_unlock_irqrestore(&rx->submit_lock, flags);
  769. return 0;
  770. }
  771. static int gdm_usb_resume(struct usb_interface *intf)
  772. {
  773. struct phy_dev *phy_dev;
  774. struct lte_udev *udev;
  775. struct tx_cxt *tx;
  776. struct rx_cxt *rx;
  777. unsigned long flags;
  778. int issue_count;
  779. int i;
  780. phy_dev = usb_get_intfdata(intf);
  781. udev = phy_dev->priv_dev;
  782. rx = &udev->rx;
  783. if (udev->usb_state != PM_SUSPEND) {
  784. dev_err(intf->usb_dev, "usb resume - invalid state\n");
  785. return -1;
  786. }
  787. udev->usb_state = PM_NORMAL;
  788. spin_lock_irqsave(&rx->rx_lock, flags);
  789. issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
  790. spin_unlock_irqrestore(&rx->rx_lock, flags);
  791. if (issue_count >= 0) {
  792. for (i = 0; i < issue_count; i++)
  793. gdm_usb_recv(phy_dev->priv_dev,
  794. udev->rx_cb,
  795. phy_dev,
  796. USB_COMPLETE);
  797. }
  798. tx = &udev->tx;
  799. spin_lock_irqsave(&tx->lock, flags);
  800. queue_work(usb_tx_wq, &udev->work_tx.work);
  801. spin_unlock_irqrestore(&tx->lock, flags);
  802. return 0;
  803. }
  804. static struct usb_driver gdm_usb_lte_driver = {
  805. .name = "gdm_lte",
  806. .probe = gdm_usb_probe,
  807. .disconnect = gdm_usb_disconnect,
  808. .id_table = id_table,
  809. .supports_autosuspend = 1,
  810. .suspend = gdm_usb_suspend,
  811. .resume = gdm_usb_resume,
  812. .reset_resume = gdm_usb_resume,
  813. };
  814. static int __init gdm_usb_lte_init(void)
  815. {
  816. if (gdm_lte_event_init() < 0) {
  817. pr_err("error creating event\n");
  818. return -1;
  819. }
  820. usb_tx_wq = create_workqueue("usb_tx_wq");
  821. if (!usb_tx_wq)
  822. return -1;
  823. usb_rx_wq = create_workqueue("usb_rx_wq");
  824. if (!usb_rx_wq)
  825. return -1;
  826. return usb_register(&gdm_usb_lte_driver);
  827. }
  828. static void __exit gdm_usb_lte_exit(void)
  829. {
  830. gdm_lte_event_exit();
  831. usb_deregister(&gdm_usb_lte_driver);
  832. if (usb_tx_wq) {
  833. flush_workqueue(usb_tx_wq);
  834. destroy_workqueue(usb_tx_wq);
  835. }
  836. if (usb_rx_wq) {
  837. flush_workqueue(usb_rx_wq);
  838. destroy_workqueue(usb_rx_wq);
  839. }
  840. }
  841. module_init(gdm_usb_lte_init);
  842. module_exit(gdm_usb_lte_exit);
  843. MODULE_VERSION(DRIVER_VERSION);
  844. MODULE_DESCRIPTION("GCT LTE USB Device Driver");
  845. MODULE_LICENSE("GPL");