hci_qca.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979
  1. /*
  2. * Bluetooth Software UART Qualcomm protocol
  3. *
  4. * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
  5. * protocol extension to H4.
  6. *
  7. * Copyright (C) 2007 Texas Instruments, Inc.
  8. * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
  9. *
  10. * Acknowledgements:
  11. * This file is based on hci_ll.c, which was...
  12. * Written by Ohad Ben-Cohen <ohad@bencohen.org>
  13. * which was in turn based on hci_h4.c, which was written
  14. * by Maxim Krasnyansky and Marcel Holtmann.
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License version 2
  18. * as published by the Free Software Foundation
  19. *
  20. * This program is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23. * GNU General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU General Public License
  26. * along with this program; if not, write to the Free Software
  27. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  28. *
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/debugfs.h>
  32. #include <net/bluetooth/bluetooth.h>
  33. #include <net/bluetooth/hci_core.h>
  34. #include "hci_uart.h"
  35. #include "btqca.h"
  36. /* HCI_IBS protocol messages */
  37. #define HCI_IBS_SLEEP_IND 0xFE
  38. #define HCI_IBS_WAKE_IND 0xFD
  39. #define HCI_IBS_WAKE_ACK 0xFC
  40. #define HCI_MAX_IBS_SIZE 10
  41. /* Controller states */
  42. #define STATE_IN_BAND_SLEEP_ENABLED 1
  43. #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
  44. #define IBS_TX_IDLE_TIMEOUT_MS 2000
  45. #define BAUDRATE_SETTLE_TIMEOUT_MS 300
  46. /* HCI_IBS transmit side sleep protocol states */
  47. enum tx_ibs_states {
  48. HCI_IBS_TX_ASLEEP,
  49. HCI_IBS_TX_WAKING,
  50. HCI_IBS_TX_AWAKE,
  51. };
  52. /* HCI_IBS receive side sleep protocol states */
  53. enum rx_states {
  54. HCI_IBS_RX_ASLEEP,
  55. HCI_IBS_RX_AWAKE,
  56. };
  57. /* HCI_IBS transmit and receive side clock state vote */
  58. enum hci_ibs_clock_state_vote {
  59. HCI_IBS_VOTE_STATS_UPDATE,
  60. HCI_IBS_TX_VOTE_CLOCK_ON,
  61. HCI_IBS_TX_VOTE_CLOCK_OFF,
  62. HCI_IBS_RX_VOTE_CLOCK_ON,
  63. HCI_IBS_RX_VOTE_CLOCK_OFF,
  64. };
  65. struct qca_data {
  66. struct hci_uart *hu;
  67. struct sk_buff *rx_skb;
  68. struct sk_buff_head txq;
  69. struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
  70. spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
  71. u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
  72. u8 rx_ibs_state; /* HCI_IBS receive side power state */
  73. bool tx_vote; /* Clock must be on for TX */
  74. bool rx_vote; /* Clock must be on for RX */
  75. struct timer_list tx_idle_timer;
  76. u32 tx_idle_delay;
  77. struct timer_list wake_retrans_timer;
  78. u32 wake_retrans;
  79. struct workqueue_struct *workqueue;
  80. struct work_struct ws_awake_rx;
  81. struct work_struct ws_awake_device;
  82. struct work_struct ws_rx_vote_off;
  83. struct work_struct ws_tx_vote_off;
  84. unsigned long flags;
  85. /* For debugging purpose */
  86. u64 ibs_sent_wacks;
  87. u64 ibs_sent_slps;
  88. u64 ibs_sent_wakes;
  89. u64 ibs_recv_wacks;
  90. u64 ibs_recv_slps;
  91. u64 ibs_recv_wakes;
  92. u64 vote_last_jif;
  93. u32 vote_on_ms;
  94. u32 vote_off_ms;
  95. u64 tx_votes_on;
  96. u64 rx_votes_on;
  97. u64 tx_votes_off;
  98. u64 rx_votes_off;
  99. u64 votes_on;
  100. u64 votes_off;
  101. };
  102. static void __serial_clock_on(struct tty_struct *tty)
  103. {
  104. /* TODO: Some chipset requires to enable UART clock on client
  105. * side to save power consumption or manual work is required.
  106. * Please put your code to control UART clock here if needed
  107. */
  108. }
  109. static void __serial_clock_off(struct tty_struct *tty)
  110. {
  111. /* TODO: Some chipset requires to disable UART clock on client
  112. * side to save power consumption or manual work is required.
  113. * Please put your code to control UART clock off here if needed
  114. */
  115. }
  116. /* serial_clock_vote needs to be called with the ibs lock held */
  117. static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
  118. {
  119. struct qca_data *qca = hu->priv;
  120. unsigned int diff;
  121. bool old_vote = (qca->tx_vote | qca->rx_vote);
  122. bool new_vote;
  123. switch (vote) {
  124. case HCI_IBS_VOTE_STATS_UPDATE:
  125. diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
  126. if (old_vote)
  127. qca->vote_off_ms += diff;
  128. else
  129. qca->vote_on_ms += diff;
  130. return;
  131. case HCI_IBS_TX_VOTE_CLOCK_ON:
  132. qca->tx_vote = true;
  133. qca->tx_votes_on++;
  134. new_vote = true;
  135. break;
  136. case HCI_IBS_RX_VOTE_CLOCK_ON:
  137. qca->rx_vote = true;
  138. qca->rx_votes_on++;
  139. new_vote = true;
  140. break;
  141. case HCI_IBS_TX_VOTE_CLOCK_OFF:
  142. qca->tx_vote = false;
  143. qca->tx_votes_off++;
  144. new_vote = qca->rx_vote | qca->tx_vote;
  145. break;
  146. case HCI_IBS_RX_VOTE_CLOCK_OFF:
  147. qca->rx_vote = false;
  148. qca->rx_votes_off++;
  149. new_vote = qca->rx_vote | qca->tx_vote;
  150. break;
  151. default:
  152. BT_ERR("Voting irregularity");
  153. return;
  154. }
  155. if (new_vote != old_vote) {
  156. if (new_vote)
  157. __serial_clock_on(hu->tty);
  158. else
  159. __serial_clock_off(hu->tty);
  160. BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
  161. vote ? "true" : "false");
  162. diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
  163. if (new_vote) {
  164. qca->votes_on++;
  165. qca->vote_off_ms += diff;
  166. } else {
  167. qca->votes_off++;
  168. qca->vote_on_ms += diff;
  169. }
  170. qca->vote_last_jif = jiffies;
  171. }
  172. }
  173. /* Builds and sends an HCI_IBS command packet.
  174. * These are very simple packets with only 1 cmd byte.
  175. */
  176. static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
  177. {
  178. int err = 0;
  179. struct sk_buff *skb = NULL;
  180. struct qca_data *qca = hu->priv;
  181. BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
  182. skb = bt_skb_alloc(1, GFP_ATOMIC);
  183. if (!skb) {
  184. BT_ERR("Failed to allocate memory for HCI_IBS packet");
  185. return -ENOMEM;
  186. }
  187. /* Assign HCI_IBS type */
  188. *skb_put(skb, 1) = cmd;
  189. skb_queue_tail(&qca->txq, skb);
  190. return err;
  191. }
  192. static void qca_wq_awake_device(struct work_struct *work)
  193. {
  194. struct qca_data *qca = container_of(work, struct qca_data,
  195. ws_awake_device);
  196. struct hci_uart *hu = qca->hu;
  197. unsigned long retrans_delay;
  198. BT_DBG("hu %p wq awake device", hu);
  199. /* Vote for serial clock */
  200. serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
  201. spin_lock(&qca->hci_ibs_lock);
  202. /* Send wake indication to device */
  203. if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
  204. BT_ERR("Failed to send WAKE to device");
  205. qca->ibs_sent_wakes++;
  206. /* Start retransmit timer */
  207. retrans_delay = msecs_to_jiffies(qca->wake_retrans);
  208. mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
  209. spin_unlock(&qca->hci_ibs_lock);
  210. /* Actually send the packets */
  211. hci_uart_tx_wakeup(hu);
  212. }
  213. static void qca_wq_awake_rx(struct work_struct *work)
  214. {
  215. struct qca_data *qca = container_of(work, struct qca_data,
  216. ws_awake_rx);
  217. struct hci_uart *hu = qca->hu;
  218. BT_DBG("hu %p wq awake rx", hu);
  219. serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
  220. spin_lock(&qca->hci_ibs_lock);
  221. qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
  222. /* Always acknowledge device wake up,
  223. * sending IBS message doesn't count as TX ON.
  224. */
  225. if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
  226. BT_ERR("Failed to acknowledge device wake up");
  227. qca->ibs_sent_wacks++;
  228. spin_unlock(&qca->hci_ibs_lock);
  229. /* Actually send the packets */
  230. hci_uart_tx_wakeup(hu);
  231. }
  232. static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
  233. {
  234. struct qca_data *qca = container_of(work, struct qca_data,
  235. ws_rx_vote_off);
  236. struct hci_uart *hu = qca->hu;
  237. BT_DBG("hu %p rx clock vote off", hu);
  238. serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
  239. }
  240. static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
  241. {
  242. struct qca_data *qca = container_of(work, struct qca_data,
  243. ws_tx_vote_off);
  244. struct hci_uart *hu = qca->hu;
  245. BT_DBG("hu %p tx clock vote off", hu);
  246. /* Run HCI tx handling unlocked */
  247. hci_uart_tx_wakeup(hu);
  248. /* Now that message queued to tty driver, vote for tty clocks off.
  249. * It is up to the tty driver to pend the clocks off until tx done.
  250. */
  251. serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
  252. }
  253. static void hci_ibs_tx_idle_timeout(unsigned long arg)
  254. {
  255. struct hci_uart *hu = (struct hci_uart *)arg;
  256. struct qca_data *qca = hu->priv;
  257. unsigned long flags;
  258. BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
  259. spin_lock_irqsave_nested(&qca->hci_ibs_lock,
  260. flags, SINGLE_DEPTH_NESTING);
  261. switch (qca->tx_ibs_state) {
  262. case HCI_IBS_TX_AWAKE:
  263. /* TX_IDLE, go to SLEEP */
  264. if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
  265. BT_ERR("Failed to send SLEEP to device");
  266. break;
  267. }
  268. qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
  269. qca->ibs_sent_slps++;
  270. queue_work(qca->workqueue, &qca->ws_tx_vote_off);
  271. break;
  272. case HCI_IBS_TX_ASLEEP:
  273. case HCI_IBS_TX_WAKING:
  274. /* Fall through */
  275. default:
  276. BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
  277. break;
  278. }
  279. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  280. }
  281. static void hci_ibs_wake_retrans_timeout(unsigned long arg)
  282. {
  283. struct hci_uart *hu = (struct hci_uart *)arg;
  284. struct qca_data *qca = hu->priv;
  285. unsigned long flags, retrans_delay;
  286. bool retransmit = false;
  287. BT_DBG("hu %p wake retransmit timeout in %d state",
  288. hu, qca->tx_ibs_state);
  289. spin_lock_irqsave_nested(&qca->hci_ibs_lock,
  290. flags, SINGLE_DEPTH_NESTING);
  291. switch (qca->tx_ibs_state) {
  292. case HCI_IBS_TX_WAKING:
  293. /* No WAKE_ACK, retransmit WAKE */
  294. retransmit = true;
  295. if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
  296. BT_ERR("Failed to acknowledge device wake up");
  297. break;
  298. }
  299. qca->ibs_sent_wakes++;
  300. retrans_delay = msecs_to_jiffies(qca->wake_retrans);
  301. mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
  302. break;
  303. case HCI_IBS_TX_ASLEEP:
  304. case HCI_IBS_TX_AWAKE:
  305. /* Fall through */
  306. default:
  307. BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
  308. break;
  309. }
  310. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  311. if (retransmit)
  312. hci_uart_tx_wakeup(hu);
  313. }
  314. /* Initialize protocol */
  315. static int qca_open(struct hci_uart *hu)
  316. {
  317. struct qca_data *qca;
  318. BT_DBG("hu %p qca_open", hu);
  319. qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
  320. if (!qca)
  321. return -ENOMEM;
  322. skb_queue_head_init(&qca->txq);
  323. skb_queue_head_init(&qca->tx_wait_q);
  324. spin_lock_init(&qca->hci_ibs_lock);
  325. qca->workqueue = create_singlethread_workqueue("qca_wq");
  326. if (!qca->workqueue) {
  327. BT_ERR("QCA Workqueue not initialized properly");
  328. kfree(qca);
  329. return -ENOMEM;
  330. }
  331. INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
  332. INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
  333. INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
  334. INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
  335. qca->hu = hu;
  336. /* Assume we start with both sides asleep -- extra wakes OK */
  337. qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
  338. qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
  339. /* clocks actually on, but we start votes off */
  340. qca->tx_vote = false;
  341. qca->rx_vote = false;
  342. qca->flags = 0;
  343. qca->ibs_sent_wacks = 0;
  344. qca->ibs_sent_slps = 0;
  345. qca->ibs_sent_wakes = 0;
  346. qca->ibs_recv_wacks = 0;
  347. qca->ibs_recv_slps = 0;
  348. qca->ibs_recv_wakes = 0;
  349. qca->vote_last_jif = jiffies;
  350. qca->vote_on_ms = 0;
  351. qca->vote_off_ms = 0;
  352. qca->votes_on = 0;
  353. qca->votes_off = 0;
  354. qca->tx_votes_on = 0;
  355. qca->tx_votes_off = 0;
  356. qca->rx_votes_on = 0;
  357. qca->rx_votes_off = 0;
  358. hu->priv = qca;
  359. init_timer(&qca->wake_retrans_timer);
  360. qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
  361. qca->wake_retrans_timer.data = (u_long)hu;
  362. qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
  363. init_timer(&qca->tx_idle_timer);
  364. qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
  365. qca->tx_idle_timer.data = (u_long)hu;
  366. qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
  367. BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
  368. qca->tx_idle_delay, qca->wake_retrans);
  369. return 0;
  370. }
  371. static void qca_debugfs_init(struct hci_dev *hdev)
  372. {
  373. struct hci_uart *hu = hci_get_drvdata(hdev);
  374. struct qca_data *qca = hu->priv;
  375. struct dentry *ibs_dir;
  376. umode_t mode;
  377. if (!hdev->debugfs)
  378. return;
  379. ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
  380. /* read only */
  381. mode = S_IRUGO;
  382. debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
  383. debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
  384. debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
  385. &qca->ibs_sent_slps);
  386. debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
  387. &qca->ibs_sent_wakes);
  388. debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
  389. &qca->ibs_sent_wacks);
  390. debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
  391. &qca->ibs_recv_slps);
  392. debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
  393. &qca->ibs_recv_wakes);
  394. debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
  395. &qca->ibs_recv_wacks);
  396. debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
  397. debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
  398. debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
  399. debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
  400. debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
  401. debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
  402. debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
  403. debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
  404. debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
  405. debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
  406. /* read/write */
  407. mode = S_IRUGO | S_IWUSR;
  408. debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
  409. debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
  410. &qca->tx_idle_delay);
  411. }
  412. /* Flush protocol data */
  413. static int qca_flush(struct hci_uart *hu)
  414. {
  415. struct qca_data *qca = hu->priv;
  416. BT_DBG("hu %p qca flush", hu);
  417. skb_queue_purge(&qca->tx_wait_q);
  418. skb_queue_purge(&qca->txq);
  419. return 0;
  420. }
  421. /* Close protocol */
  422. static int qca_close(struct hci_uart *hu)
  423. {
  424. struct qca_data *qca = hu->priv;
  425. BT_DBG("hu %p qca close", hu);
  426. serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
  427. skb_queue_purge(&qca->tx_wait_q);
  428. skb_queue_purge(&qca->txq);
  429. del_timer(&qca->tx_idle_timer);
  430. del_timer(&qca->wake_retrans_timer);
  431. destroy_workqueue(qca->workqueue);
  432. qca->hu = NULL;
  433. kfree_skb(qca->rx_skb);
  434. hu->priv = NULL;
  435. kfree(qca);
  436. return 0;
  437. }
  438. /* Called upon a wake-up-indication from the device.
  439. */
  440. static void device_want_to_wakeup(struct hci_uart *hu)
  441. {
  442. unsigned long flags;
  443. struct qca_data *qca = hu->priv;
  444. BT_DBG("hu %p want to wake up", hu);
  445. spin_lock_irqsave(&qca->hci_ibs_lock, flags);
  446. qca->ibs_recv_wakes++;
  447. switch (qca->rx_ibs_state) {
  448. case HCI_IBS_RX_ASLEEP:
  449. /* Make sure clock is on - we may have turned clock off since
  450. * receiving the wake up indicator awake rx clock.
  451. */
  452. queue_work(qca->workqueue, &qca->ws_awake_rx);
  453. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  454. return;
  455. case HCI_IBS_RX_AWAKE:
  456. /* Always acknowledge device wake up,
  457. * sending IBS message doesn't count as TX ON.
  458. */
  459. if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
  460. BT_ERR("Failed to acknowledge device wake up");
  461. break;
  462. }
  463. qca->ibs_sent_wacks++;
  464. break;
  465. default:
  466. /* Any other state is illegal */
  467. BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
  468. qca->rx_ibs_state);
  469. break;
  470. }
  471. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  472. /* Actually send the packets */
  473. hci_uart_tx_wakeup(hu);
  474. }
  475. /* Called upon a sleep-indication from the device.
  476. */
  477. static void device_want_to_sleep(struct hci_uart *hu)
  478. {
  479. unsigned long flags;
  480. struct qca_data *qca = hu->priv;
  481. BT_DBG("hu %p want to sleep", hu);
  482. spin_lock_irqsave(&qca->hci_ibs_lock, flags);
  483. qca->ibs_recv_slps++;
  484. switch (qca->rx_ibs_state) {
  485. case HCI_IBS_RX_AWAKE:
  486. /* Update state */
  487. qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
  488. /* Vote off rx clock under workqueue */
  489. queue_work(qca->workqueue, &qca->ws_rx_vote_off);
  490. break;
  491. case HCI_IBS_RX_ASLEEP:
  492. /* Fall through */
  493. default:
  494. /* Any other state is illegal */
  495. BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
  496. qca->rx_ibs_state);
  497. break;
  498. }
  499. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  500. }
  501. /* Called upon wake-up-acknowledgement from the device
  502. */
  503. static void device_woke_up(struct hci_uart *hu)
  504. {
  505. unsigned long flags, idle_delay;
  506. struct qca_data *qca = hu->priv;
  507. struct sk_buff *skb = NULL;
  508. BT_DBG("hu %p woke up", hu);
  509. spin_lock_irqsave(&qca->hci_ibs_lock, flags);
  510. qca->ibs_recv_wacks++;
  511. switch (qca->tx_ibs_state) {
  512. case HCI_IBS_TX_AWAKE:
  513. /* Expect one if we send 2 WAKEs */
  514. BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
  515. qca->tx_ibs_state);
  516. break;
  517. case HCI_IBS_TX_WAKING:
  518. /* Send pending packets */
  519. while ((skb = skb_dequeue(&qca->tx_wait_q)))
  520. skb_queue_tail(&qca->txq, skb);
  521. /* Switch timers and change state to HCI_IBS_TX_AWAKE */
  522. del_timer(&qca->wake_retrans_timer);
  523. idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
  524. mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
  525. qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
  526. break;
  527. case HCI_IBS_TX_ASLEEP:
  528. /* Fall through */
  529. default:
  530. BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
  531. qca->tx_ibs_state);
  532. break;
  533. }
  534. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  535. /* Actually send the packets */
  536. hci_uart_tx_wakeup(hu);
  537. }
  538. /* Enqueue frame for transmittion (padding, crc, etc) may be called from
  539. * two simultaneous tasklets.
  540. */
  541. static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
  542. {
  543. unsigned long flags = 0, idle_delay;
  544. struct qca_data *qca = hu->priv;
  545. BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
  546. qca->tx_ibs_state);
  547. /* Prepend skb with frame type */
  548. memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
  549. /* Don't go to sleep in middle of patch download or
  550. * Out-Of-Band(GPIOs control) sleep is selected.
  551. */
  552. if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
  553. skb_queue_tail(&qca->txq, skb);
  554. return 0;
  555. }
  556. spin_lock_irqsave(&qca->hci_ibs_lock, flags);
  557. /* Act according to current state */
  558. switch (qca->tx_ibs_state) {
  559. case HCI_IBS_TX_AWAKE:
  560. BT_DBG("Device awake, sending normally");
  561. skb_queue_tail(&qca->txq, skb);
  562. idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
  563. mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
  564. break;
  565. case HCI_IBS_TX_ASLEEP:
  566. BT_DBG("Device asleep, waking up and queueing packet");
  567. /* Save packet for later */
  568. skb_queue_tail(&qca->tx_wait_q, skb);
  569. qca->tx_ibs_state = HCI_IBS_TX_WAKING;
  570. /* Schedule a work queue to wake up device */
  571. queue_work(qca->workqueue, &qca->ws_awake_device);
  572. break;
  573. case HCI_IBS_TX_WAKING:
  574. BT_DBG("Device waking up, queueing packet");
  575. /* Transient state; just keep packet for later */
  576. skb_queue_tail(&qca->tx_wait_q, skb);
  577. break;
  578. default:
  579. BT_ERR("Illegal tx state: %d (losing packet)",
  580. qca->tx_ibs_state);
  581. kfree_skb(skb);
  582. break;
  583. }
  584. spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
  585. return 0;
  586. }
  587. static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
  588. {
  589. struct hci_uart *hu = hci_get_drvdata(hdev);
  590. BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
  591. device_want_to_sleep(hu);
  592. kfree_skb(skb);
  593. return 0;
  594. }
  595. static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
  596. {
  597. struct hci_uart *hu = hci_get_drvdata(hdev);
  598. BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
  599. device_want_to_wakeup(hu);
  600. kfree_skb(skb);
  601. return 0;
  602. }
  603. static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
  604. {
  605. struct hci_uart *hu = hci_get_drvdata(hdev);
  606. BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
  607. device_woke_up(hu);
  608. kfree_skb(skb);
  609. return 0;
  610. }
  611. #define QCA_IBS_SLEEP_IND_EVENT \
  612. .type = HCI_IBS_SLEEP_IND, \
  613. .hlen = 0, \
  614. .loff = 0, \
  615. .lsize = 0, \
  616. .maxlen = HCI_MAX_IBS_SIZE
  617. #define QCA_IBS_WAKE_IND_EVENT \
  618. .type = HCI_IBS_WAKE_IND, \
  619. .hlen = 0, \
  620. .loff = 0, \
  621. .lsize = 0, \
  622. .maxlen = HCI_MAX_IBS_SIZE
  623. #define QCA_IBS_WAKE_ACK_EVENT \
  624. .type = HCI_IBS_WAKE_ACK, \
  625. .hlen = 0, \
  626. .loff = 0, \
  627. .lsize = 0, \
  628. .maxlen = HCI_MAX_IBS_SIZE
  629. static const struct h4_recv_pkt qca_recv_pkts[] = {
  630. { H4_RECV_ACL, .recv = hci_recv_frame },
  631. { H4_RECV_SCO, .recv = hci_recv_frame },
  632. { H4_RECV_EVENT, .recv = hci_recv_frame },
  633. { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
  634. { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
  635. { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
  636. };
  637. static int qca_recv(struct hci_uart *hu, const void *data, int count)
  638. {
  639. struct qca_data *qca = hu->priv;
  640. if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
  641. return -EUNATCH;
  642. qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
  643. qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
  644. if (IS_ERR(qca->rx_skb)) {
  645. int err = PTR_ERR(qca->rx_skb);
  646. BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
  647. qca->rx_skb = NULL;
  648. return err;
  649. }
  650. return count;
  651. }
  652. static struct sk_buff *qca_dequeue(struct hci_uart *hu)
  653. {
  654. struct qca_data *qca = hu->priv;
  655. return skb_dequeue(&qca->txq);
  656. }
  657. static uint8_t qca_get_baudrate_value(int speed)
  658. {
  659. switch (speed) {
  660. case 9600:
  661. return QCA_BAUDRATE_9600;
  662. case 19200:
  663. return QCA_BAUDRATE_19200;
  664. case 38400:
  665. return QCA_BAUDRATE_38400;
  666. case 57600:
  667. return QCA_BAUDRATE_57600;
  668. case 115200:
  669. return QCA_BAUDRATE_115200;
  670. case 230400:
  671. return QCA_BAUDRATE_230400;
  672. case 460800:
  673. return QCA_BAUDRATE_460800;
  674. case 500000:
  675. return QCA_BAUDRATE_500000;
  676. case 921600:
  677. return QCA_BAUDRATE_921600;
  678. case 1000000:
  679. return QCA_BAUDRATE_1000000;
  680. case 2000000:
  681. return QCA_BAUDRATE_2000000;
  682. case 3000000:
  683. return QCA_BAUDRATE_3000000;
  684. case 3500000:
  685. return QCA_BAUDRATE_3500000;
  686. default:
  687. return QCA_BAUDRATE_115200;
  688. }
  689. }
  690. static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
  691. {
  692. struct hci_uart *hu = hci_get_drvdata(hdev);
  693. struct qca_data *qca = hu->priv;
  694. struct sk_buff *skb;
  695. u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
  696. if (baudrate > QCA_BAUDRATE_3000000)
  697. return -EINVAL;
  698. cmd[4] = baudrate;
  699. skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
  700. if (!skb) {
  701. BT_ERR("Failed to allocate memory for baudrate packet");
  702. return -ENOMEM;
  703. }
  704. /* Assign commands to change baudrate and packet type. */
  705. memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
  706. bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
  707. skb_queue_tail(&qca->txq, skb);
  708. hci_uart_tx_wakeup(hu);
  709. /* wait 300ms to change new baudrate on controller side
  710. * controller will come back after they receive this HCI command
  711. * then host can communicate with new baudrate to controller
  712. */
  713. set_current_state(TASK_UNINTERRUPTIBLE);
  714. schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
  715. set_current_state(TASK_RUNNING);
  716. return 0;
  717. }
  718. static int qca_setup(struct hci_uart *hu)
  719. {
  720. struct hci_dev *hdev = hu->hdev;
  721. struct qca_data *qca = hu->priv;
  722. unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
  723. int ret;
  724. BT_INFO("%s: ROME setup", hdev->name);
  725. /* Patch downloading has to be done without IBS mode */
  726. clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
  727. /* Setup initial baudrate */
  728. speed = 0;
  729. if (hu->init_speed)
  730. speed = hu->init_speed;
  731. else if (hu->proto->init_speed)
  732. speed = hu->proto->init_speed;
  733. if (speed)
  734. hci_uart_set_baudrate(hu, speed);
  735. /* Setup user speed if needed */
  736. speed = 0;
  737. if (hu->oper_speed)
  738. speed = hu->oper_speed;
  739. else if (hu->proto->oper_speed)
  740. speed = hu->proto->oper_speed;
  741. if (speed) {
  742. qca_baudrate = qca_get_baudrate_value(speed);
  743. BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
  744. ret = qca_set_baudrate(hdev, qca_baudrate);
  745. if (ret) {
  746. BT_ERR("%s: Failed to change the baud rate (%d)",
  747. hdev->name, ret);
  748. return ret;
  749. }
  750. hci_uart_set_baudrate(hu, speed);
  751. }
  752. /* Setup patch / NVM configurations */
  753. ret = qca_uart_setup_rome(hdev, qca_baudrate);
  754. if (!ret) {
  755. set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
  756. qca_debugfs_init(hdev);
  757. } else if (ret == -ENOENT) {
  758. /* No patch/nvm-config found, run with original fw/config */
  759. ret = 0;
  760. } else if (ret == -EAGAIN) {
  761. /*
  762. * Userspace firmware loader will return -EAGAIN in case no
  763. * patch/nvm-config is found, so run with original fw/config.
  764. */
  765. ret = 0;
  766. }
  767. /* Setup bdaddr */
  768. hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
  769. return ret;
  770. }
  771. static struct hci_uart_proto qca_proto = {
  772. .id = HCI_UART_QCA,
  773. .name = "QCA",
  774. .manufacturer = 29,
  775. .init_speed = 115200,
  776. .oper_speed = 3000000,
  777. .open = qca_open,
  778. .close = qca_close,
  779. .flush = qca_flush,
  780. .setup = qca_setup,
  781. .recv = qca_recv,
  782. .enqueue = qca_enqueue,
  783. .dequeue = qca_dequeue,
  784. };
  785. int __init qca_init(void)
  786. {
  787. return hci_uart_register_proto(&qca_proto);
  788. }
  789. int __exit qca_deinit(void)
  790. {
  791. return hci_uart_unregister_proto(&qca_proto);
  792. }