hci_h5.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765
  1. /*
  2. *
  3. * Bluetooth HCI Three-wire UART driver
  4. *
  5. * Copyright (C) 2012 Intel Corporation
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/errno.h>
  25. #include <linux/skbuff.h>
  26. #include <net/bluetooth/bluetooth.h>
  27. #include <net/bluetooth/hci_core.h>
  28. #include "hci_uart.h"
  29. #define HCI_3WIRE_ACK_PKT 0
  30. #define HCI_3WIRE_LINK_PKT 15
  31. /* Sliding window size */
  32. #define H5_TX_WIN_MAX 4
  33. #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
  34. #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
  35. /*
  36. * Maximum Three-wire packet:
  37. * 4 byte header + max value for 12-bit length + 2 bytes for CRC
  38. */
  39. #define H5_MAX_LEN (4 + 0xfff + 2)
  40. /* Convenience macros for reading Three-wire header values */
  41. #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
  42. #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
  43. #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
  44. #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
  45. #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
  46. #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
  47. #define SLIP_DELIMITER 0xc0
  48. #define SLIP_ESC 0xdb
  49. #define SLIP_ESC_DELIM 0xdc
  50. #define SLIP_ESC_ESC 0xdd
  51. /* H5 state flags */
  52. enum {
  53. H5_RX_ESC, /* SLIP escape mode */
  54. H5_TX_ACK_REQ, /* Pending ack to send */
  55. };
  56. struct h5 {
  57. struct sk_buff_head unack; /* Unack'ed packets queue */
  58. struct sk_buff_head rel; /* Reliable packets queue */
  59. struct sk_buff_head unrel; /* Unreliable packets queue */
  60. unsigned long flags;
  61. struct sk_buff *rx_skb; /* Receive buffer */
  62. size_t rx_pending; /* Expecting more bytes */
  63. u8 rx_ack; /* Last ack number received */
  64. int (*rx_func)(struct hci_uart *hu, u8 c);
  65. struct timer_list timer; /* Retransmission timer */
  66. u8 tx_seq; /* Next seq number to send */
  67. u8 tx_ack; /* Next ack number to send */
  68. u8 tx_win; /* Sliding window size */
  69. enum {
  70. H5_UNINITIALIZED,
  71. H5_INITIALIZED,
  72. H5_ACTIVE,
  73. } state;
  74. enum {
  75. H5_AWAKE,
  76. H5_SLEEPING,
  77. H5_WAKING_UP,
  78. } sleep;
  79. };
  80. static void h5_reset_rx(struct h5 *h5);
  81. static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
  82. {
  83. struct h5 *h5 = hu->priv;
  84. struct sk_buff *nskb;
  85. nskb = alloc_skb(3, GFP_ATOMIC);
  86. if (!nskb)
  87. return;
  88. bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT;
  89. memcpy(skb_put(nskb, len), data, len);
  90. skb_queue_tail(&h5->unrel, nskb);
  91. }
  92. static u8 h5_cfg_field(struct h5 *h5)
  93. {
  94. u8 field = 0;
  95. /* Sliding window size (first 3 bits) */
  96. field |= (h5->tx_win & 7);
  97. return field;
  98. }
  99. static void h5_timed_event(unsigned long arg)
  100. {
  101. const unsigned char sync_req[] = { 0x01, 0x7e };
  102. unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
  103. struct hci_uart *hu = (struct hci_uart *)arg;
  104. struct h5 *h5 = hu->priv;
  105. struct sk_buff *skb;
  106. unsigned long flags;
  107. BT_DBG("%s", hu->hdev->name);
  108. if (h5->state == H5_UNINITIALIZED)
  109. h5_link_control(hu, sync_req, sizeof(sync_req));
  110. if (h5->state == H5_INITIALIZED) {
  111. conf_req[2] = h5_cfg_field(h5);
  112. h5_link_control(hu, conf_req, sizeof(conf_req));
  113. }
  114. if (h5->state != H5_ACTIVE) {
  115. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  116. goto wakeup;
  117. }
  118. if (h5->sleep != H5_AWAKE) {
  119. h5->sleep = H5_SLEEPING;
  120. goto wakeup;
  121. }
  122. BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
  123. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  124. while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
  125. h5->tx_seq = (h5->tx_seq - 1) & 0x07;
  126. skb_queue_head(&h5->rel, skb);
  127. }
  128. spin_unlock_irqrestore(&h5->unack.lock, flags);
  129. wakeup:
  130. hci_uart_tx_wakeup(hu);
  131. }
  132. static void h5_peer_reset(struct hci_uart *hu)
  133. {
  134. struct h5 *h5 = hu->priv;
  135. BT_ERR("Peer device has reset");
  136. h5->state = H5_UNINITIALIZED;
  137. del_timer(&h5->timer);
  138. skb_queue_purge(&h5->rel);
  139. skb_queue_purge(&h5->unrel);
  140. skb_queue_purge(&h5->unack);
  141. h5->tx_seq = 0;
  142. h5->tx_ack = 0;
  143. /* Send reset request to upper stack */
  144. hci_reset_dev(hu->hdev);
  145. }
  146. static int h5_open(struct hci_uart *hu)
  147. {
  148. struct h5 *h5;
  149. const unsigned char sync[] = { 0x01, 0x7e };
  150. BT_DBG("hu %p", hu);
  151. h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
  152. if (!h5)
  153. return -ENOMEM;
  154. hu->priv = h5;
  155. skb_queue_head_init(&h5->unack);
  156. skb_queue_head_init(&h5->rel);
  157. skb_queue_head_init(&h5->unrel);
  158. h5_reset_rx(h5);
  159. init_timer(&h5->timer);
  160. h5->timer.function = h5_timed_event;
  161. h5->timer.data = (unsigned long)hu;
  162. h5->tx_win = H5_TX_WIN_MAX;
  163. set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
  164. /* Send initial sync request */
  165. h5_link_control(hu, sync, sizeof(sync));
  166. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  167. return 0;
  168. }
  169. static int h5_close(struct hci_uart *hu)
  170. {
  171. struct h5 *h5 = hu->priv;
  172. del_timer_sync(&h5->timer);
  173. skb_queue_purge(&h5->unack);
  174. skb_queue_purge(&h5->rel);
  175. skb_queue_purge(&h5->unrel);
  176. kfree(h5);
  177. return 0;
  178. }
  179. static void h5_pkt_cull(struct h5 *h5)
  180. {
  181. struct sk_buff *skb, *tmp;
  182. unsigned long flags;
  183. int i, to_remove;
  184. u8 seq;
  185. spin_lock_irqsave(&h5->unack.lock, flags);
  186. to_remove = skb_queue_len(&h5->unack);
  187. if (to_remove == 0)
  188. goto unlock;
  189. seq = h5->tx_seq;
  190. while (to_remove > 0) {
  191. if (h5->rx_ack == seq)
  192. break;
  193. to_remove--;
  194. seq = (seq - 1) & 0x07;
  195. }
  196. if (seq != h5->rx_ack)
  197. BT_ERR("Controller acked invalid packet");
  198. i = 0;
  199. skb_queue_walk_safe(&h5->unack, skb, tmp) {
  200. if (i++ >= to_remove)
  201. break;
  202. __skb_unlink(skb, &h5->unack);
  203. kfree_skb(skb);
  204. }
  205. if (skb_queue_empty(&h5->unack))
  206. del_timer(&h5->timer);
  207. unlock:
  208. spin_unlock_irqrestore(&h5->unack.lock, flags);
  209. }
  210. static void h5_handle_internal_rx(struct hci_uart *hu)
  211. {
  212. struct h5 *h5 = hu->priv;
  213. const unsigned char sync_req[] = { 0x01, 0x7e };
  214. const unsigned char sync_rsp[] = { 0x02, 0x7d };
  215. unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
  216. const unsigned char conf_rsp[] = { 0x04, 0x7b };
  217. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  218. const unsigned char woken_req[] = { 0x06, 0xf9 };
  219. const unsigned char sleep_req[] = { 0x07, 0x78 };
  220. const unsigned char *hdr = h5->rx_skb->data;
  221. const unsigned char *data = &h5->rx_skb->data[4];
  222. BT_DBG("%s", hu->hdev->name);
  223. if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
  224. return;
  225. if (H5_HDR_LEN(hdr) < 2)
  226. return;
  227. conf_req[2] = h5_cfg_field(h5);
  228. if (memcmp(data, sync_req, 2) == 0) {
  229. if (h5->state == H5_ACTIVE)
  230. h5_peer_reset(hu);
  231. h5_link_control(hu, sync_rsp, 2);
  232. } else if (memcmp(data, sync_rsp, 2) == 0) {
  233. if (h5->state == H5_ACTIVE)
  234. h5_peer_reset(hu);
  235. h5->state = H5_INITIALIZED;
  236. h5_link_control(hu, conf_req, 3);
  237. } else if (memcmp(data, conf_req, 2) == 0) {
  238. h5_link_control(hu, conf_rsp, 2);
  239. h5_link_control(hu, conf_req, 3);
  240. } else if (memcmp(data, conf_rsp, 2) == 0) {
  241. if (H5_HDR_LEN(hdr) > 2)
  242. h5->tx_win = (data[2] & 7);
  243. BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
  244. h5->state = H5_ACTIVE;
  245. hci_uart_init_ready(hu);
  246. return;
  247. } else if (memcmp(data, sleep_req, 2) == 0) {
  248. BT_DBG("Peer went to sleep");
  249. h5->sleep = H5_SLEEPING;
  250. return;
  251. } else if (memcmp(data, woken_req, 2) == 0) {
  252. BT_DBG("Peer woke up");
  253. h5->sleep = H5_AWAKE;
  254. } else if (memcmp(data, wakeup_req, 2) == 0) {
  255. BT_DBG("Peer requested wakeup");
  256. h5_link_control(hu, woken_req, 2);
  257. h5->sleep = H5_AWAKE;
  258. } else {
  259. BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
  260. return;
  261. }
  262. hci_uart_tx_wakeup(hu);
  263. }
  264. static void h5_complete_rx_pkt(struct hci_uart *hu)
  265. {
  266. struct h5 *h5 = hu->priv;
  267. const unsigned char *hdr = h5->rx_skb->data;
  268. if (H5_HDR_RELIABLE(hdr)) {
  269. h5->tx_ack = (h5->tx_ack + 1) % 8;
  270. set_bit(H5_TX_ACK_REQ, &h5->flags);
  271. hci_uart_tx_wakeup(hu);
  272. }
  273. h5->rx_ack = H5_HDR_ACK(hdr);
  274. h5_pkt_cull(h5);
  275. switch (H5_HDR_PKT_TYPE(hdr)) {
  276. case HCI_EVENT_PKT:
  277. case HCI_ACLDATA_PKT:
  278. case HCI_SCODATA_PKT:
  279. bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
  280. /* Remove Three-wire header */
  281. skb_pull(h5->rx_skb, 4);
  282. hci_recv_frame(hu->hdev, h5->rx_skb);
  283. h5->rx_skb = NULL;
  284. break;
  285. default:
  286. h5_handle_internal_rx(hu);
  287. break;
  288. }
  289. h5_reset_rx(h5);
  290. }
  291. static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
  292. {
  293. h5_complete_rx_pkt(hu);
  294. return 0;
  295. }
  296. static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
  297. {
  298. struct h5 *h5 = hu->priv;
  299. const unsigned char *hdr = h5->rx_skb->data;
  300. if (H5_HDR_CRC(hdr)) {
  301. h5->rx_func = h5_rx_crc;
  302. h5->rx_pending = 2;
  303. } else {
  304. h5_complete_rx_pkt(hu);
  305. }
  306. return 0;
  307. }
  308. static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
  309. {
  310. struct h5 *h5 = hu->priv;
  311. const unsigned char *hdr = h5->rx_skb->data;
  312. BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
  313. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  314. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  315. H5_HDR_LEN(hdr));
  316. if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
  317. BT_ERR("Invalid header checksum");
  318. h5_reset_rx(h5);
  319. return 0;
  320. }
  321. if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
  322. BT_ERR("Out-of-order packet arrived (%u != %u)",
  323. H5_HDR_SEQ(hdr), h5->tx_ack);
  324. h5_reset_rx(h5);
  325. return 0;
  326. }
  327. if (h5->state != H5_ACTIVE &&
  328. H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
  329. BT_ERR("Non-link packet received in non-active state");
  330. h5_reset_rx(h5);
  331. return 0;
  332. }
  333. h5->rx_func = h5_rx_payload;
  334. h5->rx_pending = H5_HDR_LEN(hdr);
  335. return 0;
  336. }
  337. static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
  338. {
  339. struct h5 *h5 = hu->priv;
  340. if (c == SLIP_DELIMITER)
  341. return 1;
  342. h5->rx_func = h5_rx_3wire_hdr;
  343. h5->rx_pending = 4;
  344. h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
  345. if (!h5->rx_skb) {
  346. BT_ERR("Can't allocate mem for new packet");
  347. h5_reset_rx(h5);
  348. return -ENOMEM;
  349. }
  350. h5->rx_skb->dev = (void *)hu->hdev;
  351. return 0;
  352. }
  353. static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
  354. {
  355. struct h5 *h5 = hu->priv;
  356. if (c == SLIP_DELIMITER)
  357. h5->rx_func = h5_rx_pkt_start;
  358. return 1;
  359. }
  360. static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
  361. {
  362. const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
  363. const u8 *byte = &c;
  364. if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
  365. set_bit(H5_RX_ESC, &h5->flags);
  366. return;
  367. }
  368. if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
  369. switch (c) {
  370. case SLIP_ESC_DELIM:
  371. byte = &delim;
  372. break;
  373. case SLIP_ESC_ESC:
  374. byte = &esc;
  375. break;
  376. default:
  377. BT_ERR("Invalid esc byte 0x%02hhx", c);
  378. h5_reset_rx(h5);
  379. return;
  380. }
  381. }
  382. memcpy(skb_put(h5->rx_skb, 1), byte, 1);
  383. h5->rx_pending--;
  384. BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
  385. }
  386. static void h5_reset_rx(struct h5 *h5)
  387. {
  388. if (h5->rx_skb) {
  389. kfree_skb(h5->rx_skb);
  390. h5->rx_skb = NULL;
  391. }
  392. h5->rx_func = h5_rx_delimiter;
  393. h5->rx_pending = 0;
  394. clear_bit(H5_RX_ESC, &h5->flags);
  395. }
  396. static int h5_recv(struct hci_uart *hu, const void *data, int count)
  397. {
  398. struct h5 *h5 = hu->priv;
  399. const unsigned char *ptr = data;
  400. BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
  401. count);
  402. while (count > 0) {
  403. int processed;
  404. if (h5->rx_pending > 0) {
  405. if (*ptr == SLIP_DELIMITER) {
  406. BT_ERR("Too short H5 packet");
  407. h5_reset_rx(h5);
  408. continue;
  409. }
  410. h5_unslip_one_byte(h5, *ptr);
  411. ptr++; count--;
  412. continue;
  413. }
  414. processed = h5->rx_func(hu, *ptr);
  415. if (processed < 0)
  416. return processed;
  417. ptr += processed;
  418. count -= processed;
  419. }
  420. return 0;
  421. }
  422. static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
  423. {
  424. struct h5 *h5 = hu->priv;
  425. if (skb->len > 0xfff) {
  426. BT_ERR("Packet too long (%u bytes)", skb->len);
  427. kfree_skb(skb);
  428. return 0;
  429. }
  430. if (h5->state != H5_ACTIVE) {
  431. BT_ERR("Ignoring HCI data in non-active state");
  432. kfree_skb(skb);
  433. return 0;
  434. }
  435. switch (bt_cb(skb)->pkt_type) {
  436. case HCI_ACLDATA_PKT:
  437. case HCI_COMMAND_PKT:
  438. skb_queue_tail(&h5->rel, skb);
  439. break;
  440. case HCI_SCODATA_PKT:
  441. skb_queue_tail(&h5->unrel, skb);
  442. break;
  443. default:
  444. BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
  445. kfree_skb(skb);
  446. break;
  447. }
  448. return 0;
  449. }
  450. static void h5_slip_delim(struct sk_buff *skb)
  451. {
  452. const char delim = SLIP_DELIMITER;
  453. memcpy(skb_put(skb, 1), &delim, 1);
  454. }
  455. static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
  456. {
  457. const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
  458. const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
  459. switch (c) {
  460. case SLIP_DELIMITER:
  461. memcpy(skb_put(skb, 2), &esc_delim, 2);
  462. break;
  463. case SLIP_ESC:
  464. memcpy(skb_put(skb, 2), &esc_esc, 2);
  465. break;
  466. default:
  467. memcpy(skb_put(skb, 1), &c, 1);
  468. }
  469. }
  470. static bool valid_packet_type(u8 type)
  471. {
  472. switch (type) {
  473. case HCI_ACLDATA_PKT:
  474. case HCI_COMMAND_PKT:
  475. case HCI_SCODATA_PKT:
  476. case HCI_3WIRE_LINK_PKT:
  477. case HCI_3WIRE_ACK_PKT:
  478. return true;
  479. default:
  480. return false;
  481. }
  482. }
  483. static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
  484. const u8 *data, size_t len)
  485. {
  486. struct h5 *h5 = hu->priv;
  487. struct sk_buff *nskb;
  488. u8 hdr[4];
  489. int i;
  490. if (!valid_packet_type(pkt_type)) {
  491. BT_ERR("Unknown packet type %u", pkt_type);
  492. return NULL;
  493. }
  494. /*
  495. * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
  496. * (because bytes 0xc0 and 0xdb are escaped, worst case is when
  497. * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
  498. * delimiters at start and end).
  499. */
  500. nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
  501. if (!nskb)
  502. return NULL;
  503. bt_cb(nskb)->pkt_type = pkt_type;
  504. h5_slip_delim(nskb);
  505. hdr[0] = h5->tx_ack << 3;
  506. clear_bit(H5_TX_ACK_REQ, &h5->flags);
  507. /* Reliable packet? */
  508. if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
  509. hdr[0] |= 1 << 7;
  510. hdr[0] |= h5->tx_seq;
  511. h5->tx_seq = (h5->tx_seq + 1) % 8;
  512. }
  513. hdr[1] = pkt_type | ((len & 0x0f) << 4);
  514. hdr[2] = len >> 4;
  515. hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
  516. BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
  517. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  518. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  519. H5_HDR_LEN(hdr));
  520. for (i = 0; i < 4; i++)
  521. h5_slip_one_byte(nskb, hdr[i]);
  522. for (i = 0; i < len; i++)
  523. h5_slip_one_byte(nskb, data[i]);
  524. h5_slip_delim(nskb);
  525. return nskb;
  526. }
  527. static struct sk_buff *h5_dequeue(struct hci_uart *hu)
  528. {
  529. struct h5 *h5 = hu->priv;
  530. unsigned long flags;
  531. struct sk_buff *skb, *nskb;
  532. if (h5->sleep != H5_AWAKE) {
  533. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  534. if (h5->sleep == H5_WAKING_UP)
  535. return NULL;
  536. h5->sleep = H5_WAKING_UP;
  537. BT_DBG("Sending wakeup request");
  538. mod_timer(&h5->timer, jiffies + HZ / 100);
  539. return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
  540. }
  541. skb = skb_dequeue(&h5->unrel);
  542. if (skb) {
  543. nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
  544. skb->data, skb->len);
  545. if (nskb) {
  546. kfree_skb(skb);
  547. return nskb;
  548. }
  549. skb_queue_head(&h5->unrel, skb);
  550. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  551. }
  552. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  553. if (h5->unack.qlen >= h5->tx_win)
  554. goto unlock;
  555. skb = skb_dequeue(&h5->rel);
  556. if (skb) {
  557. nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
  558. skb->data, skb->len);
  559. if (nskb) {
  560. __skb_queue_tail(&h5->unack, skb);
  561. mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
  562. spin_unlock_irqrestore(&h5->unack.lock, flags);
  563. return nskb;
  564. }
  565. skb_queue_head(&h5->rel, skb);
  566. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  567. }
  568. unlock:
  569. spin_unlock_irqrestore(&h5->unack.lock, flags);
  570. if (test_bit(H5_TX_ACK_REQ, &h5->flags))
  571. return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
  572. return NULL;
  573. }
  574. static int h5_flush(struct hci_uart *hu)
  575. {
  576. BT_DBG("hu %p", hu);
  577. return 0;
  578. }
  579. static const struct hci_uart_proto h5p = {
  580. .id = HCI_UART_3WIRE,
  581. .name = "Three-wire (H5)",
  582. .open = h5_open,
  583. .close = h5_close,
  584. .recv = h5_recv,
  585. .enqueue = h5_enqueue,
  586. .dequeue = h5_dequeue,
  587. .flush = h5_flush,
  588. };
  589. int __init h5_init(void)
  590. {
  591. return hci_uart_register_proto(&h5p);
  592. }
  593. int __exit h5_deinit(void)
  594. {
  595. return hci_uart_unregister_proto(&h5p);
  596. }