hci_sock.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /*
  2. BlueZ - Bluetooth protocol stack for Linux
  3. Copyright (C) 2000-2001 Qualcomm Incorporated
  4. Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License version 2 as
  7. published by the Free Software Foundation;
  8. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  9. OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  10. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
  11. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
  12. CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
  13. WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
  17. COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
  18. SOFTWARE IS DISCLAIMED.
  19. */
  20. /* Bluetooth HCI sockets. */
  21. #include <linux/export.h>
  22. #include <asm/unaligned.h>
  23. #include <net/bluetooth/bluetooth.h>
  24. #include <net/bluetooth/hci_core.h>
  25. #include <net/bluetooth/hci_mon.h>
  26. #include <net/bluetooth/mgmt.h>
  27. #include "mgmt_util.h"
  28. static LIST_HEAD(mgmt_chan_list);
  29. static DEFINE_MUTEX(mgmt_chan_list_lock);
  30. static atomic_t monitor_promisc = ATOMIC_INIT(0);
  31. /* ----- HCI socket interface ----- */
  32. /* Socket info */
  33. #define hci_pi(sk) ((struct hci_pinfo *) sk)
  34. struct hci_pinfo {
  35. struct bt_sock bt;
  36. struct hci_dev *hdev;
  37. struct hci_filter filter;
  38. __u32 cmsg_mask;
  39. unsigned short channel;
  40. unsigned long flags;
  41. };
  42. void hci_sock_set_flag(struct sock *sk, int nr)
  43. {
  44. set_bit(nr, &hci_pi(sk)->flags);
  45. }
  46. void hci_sock_clear_flag(struct sock *sk, int nr)
  47. {
  48. clear_bit(nr, &hci_pi(sk)->flags);
  49. }
  50. int hci_sock_test_flag(struct sock *sk, int nr)
  51. {
  52. return test_bit(nr, &hci_pi(sk)->flags);
  53. }
  54. unsigned short hci_sock_get_channel(struct sock *sk)
  55. {
  56. return hci_pi(sk)->channel;
  57. }
  58. static inline int hci_test_bit(int nr, const void *addr)
  59. {
  60. return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
  61. }
  62. /* Security filter */
  63. #define HCI_SFLT_MAX_OGF 5
  64. struct hci_sec_filter {
  65. __u32 type_mask;
  66. __u32 event_mask[2];
  67. __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
  68. };
  69. static const struct hci_sec_filter hci_sec_filter = {
  70. /* Packet types */
  71. 0x10,
  72. /* Events */
  73. { 0x1000d9fe, 0x0000b00c },
  74. /* Commands */
  75. {
  76. { 0x0 },
  77. /* OGF_LINK_CTL */
  78. { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
  79. /* OGF_LINK_POLICY */
  80. { 0x00005200, 0x00000000, 0x00000000, 0x00 },
  81. /* OGF_HOST_CTL */
  82. { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
  83. /* OGF_INFO_PARAM */
  84. { 0x000002be, 0x00000000, 0x00000000, 0x00 },
  85. /* OGF_STATUS_PARAM */
  86. { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
  87. }
  88. };
  89. static struct bt_sock_list hci_sk_list = {
  90. .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
  91. };
  92. static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
  93. {
  94. struct hci_filter *flt;
  95. int flt_type, flt_event;
  96. /* Apply filter */
  97. flt = &hci_pi(sk)->filter;
  98. flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
  99. if (!test_bit(flt_type, &flt->type_mask))
  100. return true;
  101. /* Extra filter for event packets only */
  102. if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
  103. return false;
  104. flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
  105. if (!hci_test_bit(flt_event, &flt->event_mask))
  106. return true;
  107. /* Check filter only when opcode is set */
  108. if (!flt->opcode)
  109. return false;
  110. if (flt_event == HCI_EV_CMD_COMPLETE &&
  111. flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
  112. return true;
  113. if (flt_event == HCI_EV_CMD_STATUS &&
  114. flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
  115. return true;
  116. return false;
  117. }
  118. /* Send frame to RAW socket */
  119. void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
  120. {
  121. struct sock *sk;
  122. struct sk_buff *skb_copy = NULL;
  123. BT_DBG("hdev %p len %d", hdev, skb->len);
  124. read_lock(&hci_sk_list.lock);
  125. sk_for_each(sk, &hci_sk_list.head) {
  126. struct sk_buff *nskb;
  127. if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
  128. continue;
  129. /* Don't send frame to the socket it came from */
  130. if (skb->sk == sk)
  131. continue;
  132. if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
  133. if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
  134. bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
  135. bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
  136. bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
  137. continue;
  138. if (is_filtered_packet(sk, skb))
  139. continue;
  140. } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
  141. if (!bt_cb(skb)->incoming)
  142. continue;
  143. if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
  144. bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
  145. bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
  146. continue;
  147. } else {
  148. /* Don't send frame to other channel types */
  149. continue;
  150. }
  151. if (!skb_copy) {
  152. /* Create a private copy with headroom */
  153. skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
  154. if (!skb_copy)
  155. continue;
  156. /* Put type byte before the data */
  157. memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
  158. }
  159. nskb = skb_clone(skb_copy, GFP_ATOMIC);
  160. if (!nskb)
  161. continue;
  162. if (sock_queue_rcv_skb(sk, nskb))
  163. kfree_skb(nskb);
  164. }
  165. read_unlock(&hci_sk_list.lock);
  166. kfree_skb(skb_copy);
  167. }
  168. /* Send frame to sockets with specific channel */
  169. void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
  170. int flag, struct sock *skip_sk)
  171. {
  172. struct sock *sk;
  173. BT_DBG("channel %u len %d", channel, skb->len);
  174. read_lock(&hci_sk_list.lock);
  175. sk_for_each(sk, &hci_sk_list.head) {
  176. struct sk_buff *nskb;
  177. /* Ignore socket without the flag set */
  178. if (!hci_sock_test_flag(sk, flag))
  179. continue;
  180. /* Skip the original socket */
  181. if (sk == skip_sk)
  182. continue;
  183. if (sk->sk_state != BT_BOUND)
  184. continue;
  185. if (hci_pi(sk)->channel != channel)
  186. continue;
  187. nskb = skb_clone(skb, GFP_ATOMIC);
  188. if (!nskb)
  189. continue;
  190. if (sock_queue_rcv_skb(sk, nskb))
  191. kfree_skb(nskb);
  192. }
  193. read_unlock(&hci_sk_list.lock);
  194. }
  195. /* Send frame to monitor socket */
  196. void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
  197. {
  198. struct sk_buff *skb_copy = NULL;
  199. struct hci_mon_hdr *hdr;
  200. __le16 opcode;
  201. if (!atomic_read(&monitor_promisc))
  202. return;
  203. BT_DBG("hdev %p len %d", hdev, skb->len);
  204. switch (bt_cb(skb)->pkt_type) {
  205. case HCI_COMMAND_PKT:
  206. opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
  207. break;
  208. case HCI_EVENT_PKT:
  209. opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
  210. break;
  211. case HCI_ACLDATA_PKT:
  212. if (bt_cb(skb)->incoming)
  213. opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
  214. else
  215. opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
  216. break;
  217. case HCI_SCODATA_PKT:
  218. if (bt_cb(skb)->incoming)
  219. opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
  220. else
  221. opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
  222. break;
  223. case HCI_DIAG_PKT:
  224. opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
  225. break;
  226. default:
  227. return;
  228. }
  229. /* Create a private copy with headroom */
  230. skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
  231. if (!skb_copy)
  232. return;
  233. /* Put header before the data */
  234. hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
  235. hdr->opcode = opcode;
  236. hdr->index = cpu_to_le16(hdev->id);
  237. hdr->len = cpu_to_le16(skb->len);
  238. hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
  239. HCI_SOCK_TRUSTED, NULL);
  240. kfree_skb(skb_copy);
  241. }
  242. static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
  243. {
  244. struct hci_mon_hdr *hdr;
  245. struct hci_mon_new_index *ni;
  246. struct hci_mon_index_info *ii;
  247. struct sk_buff *skb;
  248. __le16 opcode;
  249. switch (event) {
  250. case HCI_DEV_REG:
  251. skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
  252. if (!skb)
  253. return NULL;
  254. ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
  255. ni->type = hdev->dev_type;
  256. ni->bus = hdev->bus;
  257. bacpy(&ni->bdaddr, &hdev->bdaddr);
  258. memcpy(ni->name, hdev->name, 8);
  259. opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
  260. break;
  261. case HCI_DEV_UNREG:
  262. skb = bt_skb_alloc(0, GFP_ATOMIC);
  263. if (!skb)
  264. return NULL;
  265. opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
  266. break;
  267. case HCI_DEV_SETUP:
  268. if (hdev->manufacturer == 0xffff)
  269. return NULL;
  270. /* fall through */
  271. case HCI_DEV_UP:
  272. skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
  273. if (!skb)
  274. return NULL;
  275. ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
  276. bacpy(&ii->bdaddr, &hdev->bdaddr);
  277. ii->manufacturer = cpu_to_le16(hdev->manufacturer);
  278. opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
  279. break;
  280. case HCI_DEV_OPEN:
  281. skb = bt_skb_alloc(0, GFP_ATOMIC);
  282. if (!skb)
  283. return NULL;
  284. opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
  285. break;
  286. case HCI_DEV_CLOSE:
  287. skb = bt_skb_alloc(0, GFP_ATOMIC);
  288. if (!skb)
  289. return NULL;
  290. opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
  291. break;
  292. default:
  293. return NULL;
  294. }
  295. __net_timestamp(skb);
  296. hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
  297. hdr->opcode = opcode;
  298. hdr->index = cpu_to_le16(hdev->id);
  299. hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
  300. return skb;
  301. }
  302. static void send_monitor_replay(struct sock *sk)
  303. {
  304. struct hci_dev *hdev;
  305. read_lock(&hci_dev_list_lock);
  306. list_for_each_entry(hdev, &hci_dev_list, list) {
  307. struct sk_buff *skb;
  308. skb = create_monitor_event(hdev, HCI_DEV_REG);
  309. if (!skb)
  310. continue;
  311. if (sock_queue_rcv_skb(sk, skb))
  312. kfree_skb(skb);
  313. if (!test_bit(HCI_RUNNING, &hdev->flags))
  314. continue;
  315. skb = create_monitor_event(hdev, HCI_DEV_OPEN);
  316. if (!skb)
  317. continue;
  318. if (sock_queue_rcv_skb(sk, skb))
  319. kfree_skb(skb);
  320. if (test_bit(HCI_UP, &hdev->flags))
  321. skb = create_monitor_event(hdev, HCI_DEV_UP);
  322. else if (hci_dev_test_flag(hdev, HCI_SETUP))
  323. skb = create_monitor_event(hdev, HCI_DEV_SETUP);
  324. else
  325. skb = NULL;
  326. if (skb) {
  327. if (sock_queue_rcv_skb(sk, skb))
  328. kfree_skb(skb);
  329. }
  330. }
  331. read_unlock(&hci_dev_list_lock);
  332. }
  333. /* Generate internal stack event */
  334. static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
  335. {
  336. struct hci_event_hdr *hdr;
  337. struct hci_ev_stack_internal *ev;
  338. struct sk_buff *skb;
  339. skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
  340. if (!skb)
  341. return;
  342. hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
  343. hdr->evt = HCI_EV_STACK_INTERNAL;
  344. hdr->plen = sizeof(*ev) + dlen;
  345. ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
  346. ev->type = type;
  347. memcpy(ev->data, data, dlen);
  348. bt_cb(skb)->incoming = 1;
  349. __net_timestamp(skb);
  350. bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
  351. hci_send_to_sock(hdev, skb);
  352. kfree_skb(skb);
  353. }
  354. void hci_sock_dev_event(struct hci_dev *hdev, int event)
  355. {
  356. BT_DBG("hdev %s event %d", hdev->name, event);
  357. if (atomic_read(&monitor_promisc)) {
  358. struct sk_buff *skb;
  359. /* Send event to monitor */
  360. skb = create_monitor_event(hdev, event);
  361. if (skb) {
  362. hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
  363. HCI_SOCK_TRUSTED, NULL);
  364. kfree_skb(skb);
  365. }
  366. }
  367. if (event <= HCI_DEV_DOWN) {
  368. struct hci_ev_si_device ev;
  369. /* Send event to sockets */
  370. ev.event = event;
  371. ev.dev_id = hdev->id;
  372. hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
  373. }
  374. if (event == HCI_DEV_UNREG) {
  375. struct sock *sk;
  376. /* Detach sockets from device */
  377. read_lock(&hci_sk_list.lock);
  378. sk_for_each(sk, &hci_sk_list.head) {
  379. bh_lock_sock_nested(sk);
  380. if (hci_pi(sk)->hdev == hdev) {
  381. hci_pi(sk)->hdev = NULL;
  382. sk->sk_err = EPIPE;
  383. sk->sk_state = BT_OPEN;
  384. sk->sk_state_change(sk);
  385. hci_dev_put(hdev);
  386. }
  387. bh_unlock_sock(sk);
  388. }
  389. read_unlock(&hci_sk_list.lock);
  390. }
  391. }
  392. static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
  393. {
  394. struct hci_mgmt_chan *c;
  395. list_for_each_entry(c, &mgmt_chan_list, list) {
  396. if (c->channel == channel)
  397. return c;
  398. }
  399. return NULL;
  400. }
  401. static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
  402. {
  403. struct hci_mgmt_chan *c;
  404. mutex_lock(&mgmt_chan_list_lock);
  405. c = __hci_mgmt_chan_find(channel);
  406. mutex_unlock(&mgmt_chan_list_lock);
  407. return c;
  408. }
  409. int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
  410. {
  411. if (c->channel < HCI_CHANNEL_CONTROL)
  412. return -EINVAL;
  413. mutex_lock(&mgmt_chan_list_lock);
  414. if (__hci_mgmt_chan_find(c->channel)) {
  415. mutex_unlock(&mgmt_chan_list_lock);
  416. return -EALREADY;
  417. }
  418. list_add_tail(&c->list, &mgmt_chan_list);
  419. mutex_unlock(&mgmt_chan_list_lock);
  420. return 0;
  421. }
  422. EXPORT_SYMBOL(hci_mgmt_chan_register);
  423. void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
  424. {
  425. mutex_lock(&mgmt_chan_list_lock);
  426. list_del(&c->list);
  427. mutex_unlock(&mgmt_chan_list_lock);
  428. }
  429. EXPORT_SYMBOL(hci_mgmt_chan_unregister);
  430. static int hci_sock_release(struct socket *sock)
  431. {
  432. struct sock *sk = sock->sk;
  433. struct hci_dev *hdev;
  434. BT_DBG("sock %p sk %p", sock, sk);
  435. if (!sk)
  436. return 0;
  437. if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
  438. atomic_dec(&monitor_promisc);
  439. bt_sock_unlink(&hci_sk_list, sk);
  440. hdev = hci_pi(sk)->hdev;
  441. if (hdev) {
  442. if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
  443. /* When releasing an user channel exclusive access,
  444. * call hci_dev_do_close directly instead of calling
  445. * hci_dev_close to ensure the exclusive access will
  446. * be released and the controller brought back down.
  447. *
  448. * The checking of HCI_AUTO_OFF is not needed in this
  449. * case since it will have been cleared already when
  450. * opening the user channel.
  451. */
  452. hci_dev_do_close(hdev);
  453. hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
  454. mgmt_index_added(hdev);
  455. }
  456. atomic_dec(&hdev->promisc);
  457. hci_dev_put(hdev);
  458. }
  459. sock_orphan(sk);
  460. skb_queue_purge(&sk->sk_receive_queue);
  461. skb_queue_purge(&sk->sk_write_queue);
  462. sock_put(sk);
  463. return 0;
  464. }
  465. static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
  466. {
  467. bdaddr_t bdaddr;
  468. int err;
  469. if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
  470. return -EFAULT;
  471. hci_dev_lock(hdev);
  472. err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
  473. hci_dev_unlock(hdev);
  474. return err;
  475. }
  476. static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
  477. {
  478. bdaddr_t bdaddr;
  479. int err;
  480. if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
  481. return -EFAULT;
  482. hci_dev_lock(hdev);
  483. err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
  484. hci_dev_unlock(hdev);
  485. return err;
  486. }
  487. /* Ioctls that require bound socket */
  488. static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
  489. unsigned long arg)
  490. {
  491. struct hci_dev *hdev = hci_pi(sk)->hdev;
  492. if (!hdev)
  493. return -EBADFD;
  494. if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
  495. return -EBUSY;
  496. if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
  497. return -EOPNOTSUPP;
  498. if (hdev->dev_type != HCI_BREDR)
  499. return -EOPNOTSUPP;
  500. switch (cmd) {
  501. case HCISETRAW:
  502. if (!capable(CAP_NET_ADMIN))
  503. return -EPERM;
  504. return -EOPNOTSUPP;
  505. case HCIGETCONNINFO:
  506. return hci_get_conn_info(hdev, (void __user *) arg);
  507. case HCIGETAUTHINFO:
  508. return hci_get_auth_info(hdev, (void __user *) arg);
  509. case HCIBLOCKADDR:
  510. if (!capable(CAP_NET_ADMIN))
  511. return -EPERM;
  512. return hci_sock_blacklist_add(hdev, (void __user *) arg);
  513. case HCIUNBLOCKADDR:
  514. if (!capable(CAP_NET_ADMIN))
  515. return -EPERM;
  516. return hci_sock_blacklist_del(hdev, (void __user *) arg);
  517. }
  518. return -ENOIOCTLCMD;
  519. }
  520. static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
  521. unsigned long arg)
  522. {
  523. void __user *argp = (void __user *) arg;
  524. struct sock *sk = sock->sk;
  525. int err;
  526. BT_DBG("cmd %x arg %lx", cmd, arg);
  527. lock_sock(sk);
  528. if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
  529. err = -EBADFD;
  530. goto done;
  531. }
  532. release_sock(sk);
  533. switch (cmd) {
  534. case HCIGETDEVLIST:
  535. return hci_get_dev_list(argp);
  536. case HCIGETDEVINFO:
  537. return hci_get_dev_info(argp);
  538. case HCIGETCONNLIST:
  539. return hci_get_conn_list(argp);
  540. case HCIDEVUP:
  541. if (!capable(CAP_NET_ADMIN))
  542. return -EPERM;
  543. return hci_dev_open(arg);
  544. case HCIDEVDOWN:
  545. if (!capable(CAP_NET_ADMIN))
  546. return -EPERM;
  547. return hci_dev_close(arg);
  548. case HCIDEVRESET:
  549. if (!capable(CAP_NET_ADMIN))
  550. return -EPERM;
  551. return hci_dev_reset(arg);
  552. case HCIDEVRESTAT:
  553. if (!capable(CAP_NET_ADMIN))
  554. return -EPERM;
  555. return hci_dev_reset_stat(arg);
  556. case HCISETSCAN:
  557. case HCISETAUTH:
  558. case HCISETENCRYPT:
  559. case HCISETPTYPE:
  560. case HCISETLINKPOL:
  561. case HCISETLINKMODE:
  562. case HCISETACLMTU:
  563. case HCISETSCOMTU:
  564. if (!capable(CAP_NET_ADMIN))
  565. return -EPERM;
  566. return hci_dev_cmd(cmd, argp);
  567. case HCIINQUIRY:
  568. return hci_inquiry(argp);
  569. }
  570. lock_sock(sk);
  571. err = hci_sock_bound_ioctl(sk, cmd, arg);
  572. done:
  573. release_sock(sk);
  574. return err;
  575. }
  576. static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
  577. int addr_len)
  578. {
  579. struct sockaddr_hci haddr;
  580. struct sock *sk = sock->sk;
  581. struct hci_dev *hdev = NULL;
  582. int len, err = 0;
  583. BT_DBG("sock %p sk %p", sock, sk);
  584. if (!addr)
  585. return -EINVAL;
  586. memset(&haddr, 0, sizeof(haddr));
  587. len = min_t(unsigned int, sizeof(haddr), addr_len);
  588. memcpy(&haddr, addr, len);
  589. if (haddr.hci_family != AF_BLUETOOTH)
  590. return -EINVAL;
  591. lock_sock(sk);
  592. if (sk->sk_state == BT_BOUND) {
  593. err = -EALREADY;
  594. goto done;
  595. }
  596. switch (haddr.hci_channel) {
  597. case HCI_CHANNEL_RAW:
  598. if (hci_pi(sk)->hdev) {
  599. err = -EALREADY;
  600. goto done;
  601. }
  602. if (haddr.hci_dev != HCI_DEV_NONE) {
  603. hdev = hci_dev_get(haddr.hci_dev);
  604. if (!hdev) {
  605. err = -ENODEV;
  606. goto done;
  607. }
  608. atomic_inc(&hdev->promisc);
  609. }
  610. hci_pi(sk)->hdev = hdev;
  611. break;
  612. case HCI_CHANNEL_USER:
  613. if (hci_pi(sk)->hdev) {
  614. err = -EALREADY;
  615. goto done;
  616. }
  617. if (haddr.hci_dev == HCI_DEV_NONE) {
  618. err = -EINVAL;
  619. goto done;
  620. }
  621. if (!capable(CAP_NET_ADMIN)) {
  622. err = -EPERM;
  623. goto done;
  624. }
  625. hdev = hci_dev_get(haddr.hci_dev);
  626. if (!hdev) {
  627. err = -ENODEV;
  628. goto done;
  629. }
  630. if (test_bit(HCI_INIT, &hdev->flags) ||
  631. hci_dev_test_flag(hdev, HCI_SETUP) ||
  632. hci_dev_test_flag(hdev, HCI_CONFIG) ||
  633. (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
  634. test_bit(HCI_UP, &hdev->flags))) {
  635. err = -EBUSY;
  636. hci_dev_put(hdev);
  637. goto done;
  638. }
  639. if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
  640. err = -EUSERS;
  641. hci_dev_put(hdev);
  642. goto done;
  643. }
  644. mgmt_index_removed(hdev);
  645. err = hci_dev_open(hdev->id);
  646. if (err) {
  647. if (err == -EALREADY) {
  648. /* In case the transport is already up and
  649. * running, clear the error here.
  650. *
  651. * This can happen when opening an user
  652. * channel and HCI_AUTO_OFF grace period
  653. * is still active.
  654. */
  655. err = 0;
  656. } else {
  657. hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
  658. mgmt_index_added(hdev);
  659. hci_dev_put(hdev);
  660. goto done;
  661. }
  662. }
  663. atomic_inc(&hdev->promisc);
  664. hci_pi(sk)->hdev = hdev;
  665. break;
  666. case HCI_CHANNEL_MONITOR:
  667. if (haddr.hci_dev != HCI_DEV_NONE) {
  668. err = -EINVAL;
  669. goto done;
  670. }
  671. if (!capable(CAP_NET_RAW)) {
  672. err = -EPERM;
  673. goto done;
  674. }
  675. /* The monitor interface is restricted to CAP_NET_RAW
  676. * capabilities and with that implicitly trusted.
  677. */
  678. hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
  679. send_monitor_replay(sk);
  680. atomic_inc(&monitor_promisc);
  681. break;
  682. default:
  683. if (!hci_mgmt_chan_find(haddr.hci_channel)) {
  684. err = -EINVAL;
  685. goto done;
  686. }
  687. if (haddr.hci_dev != HCI_DEV_NONE) {
  688. err = -EINVAL;
  689. goto done;
  690. }
  691. /* Users with CAP_NET_ADMIN capabilities are allowed
  692. * access to all management commands and events. For
  693. * untrusted users the interface is restricted and
  694. * also only untrusted events are sent.
  695. */
  696. if (capable(CAP_NET_ADMIN))
  697. hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
  698. /* At the moment the index and unconfigured index events
  699. * are enabled unconditionally. Setting them on each
  700. * socket when binding keeps this functionality. They
  701. * however might be cleared later and then sending of these
  702. * events will be disabled, but that is then intentional.
  703. *
  704. * This also enables generic events that are safe to be
  705. * received by untrusted users. Example for such events
  706. * are changes to settings, class of device, name etc.
  707. */
  708. if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
  709. hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
  710. hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
  711. hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
  712. }
  713. break;
  714. }
  715. hci_pi(sk)->channel = haddr.hci_channel;
  716. sk->sk_state = BT_BOUND;
  717. done:
  718. release_sock(sk);
  719. return err;
  720. }
  721. static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
  722. int *addr_len, int peer)
  723. {
  724. struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
  725. struct sock *sk = sock->sk;
  726. struct hci_dev *hdev;
  727. int err = 0;
  728. BT_DBG("sock %p sk %p", sock, sk);
  729. if (peer)
  730. return -EOPNOTSUPP;
  731. lock_sock(sk);
  732. hdev = hci_pi(sk)->hdev;
  733. if (!hdev) {
  734. err = -EBADFD;
  735. goto done;
  736. }
  737. *addr_len = sizeof(*haddr);
  738. haddr->hci_family = AF_BLUETOOTH;
  739. haddr->hci_dev = hdev->id;
  740. haddr->hci_channel= hci_pi(sk)->channel;
  741. done:
  742. release_sock(sk);
  743. return err;
  744. }
  745. static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
  746. struct sk_buff *skb)
  747. {
  748. __u32 mask = hci_pi(sk)->cmsg_mask;
  749. if (mask & HCI_CMSG_DIR) {
  750. int incoming = bt_cb(skb)->incoming;
  751. put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
  752. &incoming);
  753. }
  754. if (mask & HCI_CMSG_TSTAMP) {
  755. #ifdef CONFIG_COMPAT
  756. struct compat_timeval ctv;
  757. #endif
  758. struct timeval tv;
  759. void *data;
  760. int len;
  761. skb_get_timestamp(skb, &tv);
  762. data = &tv;
  763. len = sizeof(tv);
  764. #ifdef CONFIG_COMPAT
  765. if (!COMPAT_USE_64BIT_TIME &&
  766. (msg->msg_flags & MSG_CMSG_COMPAT)) {
  767. ctv.tv_sec = tv.tv_sec;
  768. ctv.tv_usec = tv.tv_usec;
  769. data = &ctv;
  770. len = sizeof(ctv);
  771. }
  772. #endif
  773. put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
  774. }
  775. }
  776. static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  777. int flags)
  778. {
  779. int noblock = flags & MSG_DONTWAIT;
  780. struct sock *sk = sock->sk;
  781. struct sk_buff *skb;
  782. int copied, err;
  783. BT_DBG("sock %p, sk %p", sock, sk);
  784. if (flags & MSG_OOB)
  785. return -EOPNOTSUPP;
  786. if (sk->sk_state == BT_CLOSED)
  787. return 0;
  788. skb = skb_recv_datagram(sk, flags, noblock, &err);
  789. if (!skb)
  790. return err;
  791. copied = skb->len;
  792. if (len < copied) {
  793. msg->msg_flags |= MSG_TRUNC;
  794. copied = len;
  795. }
  796. skb_reset_transport_header(skb);
  797. err = skb_copy_datagram_msg(skb, 0, msg, copied);
  798. switch (hci_pi(sk)->channel) {
  799. case HCI_CHANNEL_RAW:
  800. hci_sock_cmsg(sk, msg, skb);
  801. break;
  802. case HCI_CHANNEL_USER:
  803. case HCI_CHANNEL_MONITOR:
  804. sock_recv_timestamp(msg, sk, skb);
  805. break;
  806. default:
  807. if (hci_mgmt_chan_find(hci_pi(sk)->channel))
  808. sock_recv_timestamp(msg, sk, skb);
  809. break;
  810. }
  811. skb_free_datagram(sk, skb);
  812. return err ? : copied;
  813. }
  814. static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
  815. struct msghdr *msg, size_t msglen)
  816. {
  817. void *buf;
  818. u8 *cp;
  819. struct mgmt_hdr *hdr;
  820. u16 opcode, index, len;
  821. struct hci_dev *hdev = NULL;
  822. const struct hci_mgmt_handler *handler;
  823. bool var_len, no_hdev;
  824. int err;
  825. BT_DBG("got %zu bytes", msglen);
  826. if (msglen < sizeof(*hdr))
  827. return -EINVAL;
  828. buf = kmalloc(msglen, GFP_KERNEL);
  829. if (!buf)
  830. return -ENOMEM;
  831. if (memcpy_from_msg(buf, msg, msglen)) {
  832. err = -EFAULT;
  833. goto done;
  834. }
  835. hdr = buf;
  836. opcode = __le16_to_cpu(hdr->opcode);
  837. index = __le16_to_cpu(hdr->index);
  838. len = __le16_to_cpu(hdr->len);
  839. if (len != msglen - sizeof(*hdr)) {
  840. err = -EINVAL;
  841. goto done;
  842. }
  843. if (opcode >= chan->handler_count ||
  844. chan->handlers[opcode].func == NULL) {
  845. BT_DBG("Unknown op %u", opcode);
  846. err = mgmt_cmd_status(sk, index, opcode,
  847. MGMT_STATUS_UNKNOWN_COMMAND);
  848. goto done;
  849. }
  850. handler = &chan->handlers[opcode];
  851. if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
  852. !(handler->flags & HCI_MGMT_UNTRUSTED)) {
  853. err = mgmt_cmd_status(sk, index, opcode,
  854. MGMT_STATUS_PERMISSION_DENIED);
  855. goto done;
  856. }
  857. if (index != MGMT_INDEX_NONE) {
  858. hdev = hci_dev_get(index);
  859. if (!hdev) {
  860. err = mgmt_cmd_status(sk, index, opcode,
  861. MGMT_STATUS_INVALID_INDEX);
  862. goto done;
  863. }
  864. if (hci_dev_test_flag(hdev, HCI_SETUP) ||
  865. hci_dev_test_flag(hdev, HCI_CONFIG) ||
  866. hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
  867. err = mgmt_cmd_status(sk, index, opcode,
  868. MGMT_STATUS_INVALID_INDEX);
  869. goto done;
  870. }
  871. if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
  872. !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
  873. err = mgmt_cmd_status(sk, index, opcode,
  874. MGMT_STATUS_INVALID_INDEX);
  875. goto done;
  876. }
  877. }
  878. no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
  879. if (no_hdev != !hdev) {
  880. err = mgmt_cmd_status(sk, index, opcode,
  881. MGMT_STATUS_INVALID_INDEX);
  882. goto done;
  883. }
  884. var_len = (handler->flags & HCI_MGMT_VAR_LEN);
  885. if ((var_len && len < handler->data_len) ||
  886. (!var_len && len != handler->data_len)) {
  887. err = mgmt_cmd_status(sk, index, opcode,
  888. MGMT_STATUS_INVALID_PARAMS);
  889. goto done;
  890. }
  891. if (hdev && chan->hdev_init)
  892. chan->hdev_init(sk, hdev);
  893. cp = buf + sizeof(*hdr);
  894. err = handler->func(sk, hdev, cp, len);
  895. if (err < 0)
  896. goto done;
  897. err = msglen;
  898. done:
  899. if (hdev)
  900. hci_dev_put(hdev);
  901. kfree(buf);
  902. return err;
  903. }
  904. static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
  905. size_t len)
  906. {
  907. struct sock *sk = sock->sk;
  908. struct hci_mgmt_chan *chan;
  909. struct hci_dev *hdev;
  910. struct sk_buff *skb;
  911. int err;
  912. BT_DBG("sock %p sk %p", sock, sk);
  913. if (msg->msg_flags & MSG_OOB)
  914. return -EOPNOTSUPP;
  915. if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
  916. MSG_CMSG_COMPAT))
  917. return -EINVAL;
  918. if (len < 4 || len > HCI_MAX_FRAME_SIZE)
  919. return -EINVAL;
  920. lock_sock(sk);
  921. switch (hci_pi(sk)->channel) {
  922. case HCI_CHANNEL_RAW:
  923. case HCI_CHANNEL_USER:
  924. break;
  925. case HCI_CHANNEL_MONITOR:
  926. err = -EOPNOTSUPP;
  927. goto done;
  928. default:
  929. mutex_lock(&mgmt_chan_list_lock);
  930. chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
  931. if (chan)
  932. err = hci_mgmt_cmd(chan, sk, msg, len);
  933. else
  934. err = -EINVAL;
  935. mutex_unlock(&mgmt_chan_list_lock);
  936. goto done;
  937. }
  938. hdev = hci_pi(sk)->hdev;
  939. if (!hdev) {
  940. err = -EBADFD;
  941. goto done;
  942. }
  943. if (!test_bit(HCI_UP, &hdev->flags)) {
  944. err = -ENETDOWN;
  945. goto done;
  946. }
  947. skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
  948. if (!skb)
  949. goto done;
  950. if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
  951. err = -EFAULT;
  952. goto drop;
  953. }
  954. bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
  955. skb_pull(skb, 1);
  956. if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
  957. /* No permission check is needed for user channel
  958. * since that gets enforced when binding the socket.
  959. *
  960. * However check that the packet type is valid.
  961. */
  962. if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
  963. bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
  964. bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
  965. err = -EINVAL;
  966. goto drop;
  967. }
  968. skb_queue_tail(&hdev->raw_q, skb);
  969. queue_work(hdev->workqueue, &hdev->tx_work);
  970. } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
  971. u16 opcode = get_unaligned_le16(skb->data);
  972. u16 ogf = hci_opcode_ogf(opcode);
  973. u16 ocf = hci_opcode_ocf(opcode);
  974. if (((ogf > HCI_SFLT_MAX_OGF) ||
  975. !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
  976. &hci_sec_filter.ocf_mask[ogf])) &&
  977. !capable(CAP_NET_RAW)) {
  978. err = -EPERM;
  979. goto drop;
  980. }
  981. if (ogf == 0x3f) {
  982. skb_queue_tail(&hdev->raw_q, skb);
  983. queue_work(hdev->workqueue, &hdev->tx_work);
  984. } else {
  985. /* Stand-alone HCI commands must be flagged as
  986. * single-command requests.
  987. */
  988. bt_cb(skb)->hci.req_start = true;
  989. skb_queue_tail(&hdev->cmd_q, skb);
  990. queue_work(hdev->workqueue, &hdev->cmd_work);
  991. }
  992. } else {
  993. if (!capable(CAP_NET_RAW)) {
  994. err = -EPERM;
  995. goto drop;
  996. }
  997. if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
  998. bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
  999. err = -EINVAL;
  1000. goto drop;
  1001. }
  1002. skb_queue_tail(&hdev->raw_q, skb);
  1003. queue_work(hdev->workqueue, &hdev->tx_work);
  1004. }
  1005. err = len;
  1006. done:
  1007. release_sock(sk);
  1008. return err;
  1009. drop:
  1010. kfree_skb(skb);
  1011. goto done;
  1012. }
  1013. static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
  1014. char __user *optval, unsigned int len)
  1015. {
  1016. struct hci_ufilter uf = { .opcode = 0 };
  1017. struct sock *sk = sock->sk;
  1018. int err = 0, opt = 0;
  1019. BT_DBG("sk %p, opt %d", sk, optname);
  1020. lock_sock(sk);
  1021. if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
  1022. err = -EBADFD;
  1023. goto done;
  1024. }
  1025. switch (optname) {
  1026. case HCI_DATA_DIR:
  1027. if (get_user(opt, (int __user *)optval)) {
  1028. err = -EFAULT;
  1029. break;
  1030. }
  1031. if (opt)
  1032. hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
  1033. else
  1034. hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
  1035. break;
  1036. case HCI_TIME_STAMP:
  1037. if (get_user(opt, (int __user *)optval)) {
  1038. err = -EFAULT;
  1039. break;
  1040. }
  1041. if (opt)
  1042. hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
  1043. else
  1044. hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
  1045. break;
  1046. case HCI_FILTER:
  1047. {
  1048. struct hci_filter *f = &hci_pi(sk)->filter;
  1049. uf.type_mask = f->type_mask;
  1050. uf.opcode = f->opcode;
  1051. uf.event_mask[0] = *((u32 *) f->event_mask + 0);
  1052. uf.event_mask[1] = *((u32 *) f->event_mask + 1);
  1053. }
  1054. len = min_t(unsigned int, len, sizeof(uf));
  1055. if (copy_from_user(&uf, optval, len)) {
  1056. err = -EFAULT;
  1057. break;
  1058. }
  1059. if (!capable(CAP_NET_RAW)) {
  1060. uf.type_mask &= hci_sec_filter.type_mask;
  1061. uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
  1062. uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
  1063. }
  1064. {
  1065. struct hci_filter *f = &hci_pi(sk)->filter;
  1066. f->type_mask = uf.type_mask;
  1067. f->opcode = uf.opcode;
  1068. *((u32 *) f->event_mask + 0) = uf.event_mask[0];
  1069. *((u32 *) f->event_mask + 1) = uf.event_mask[1];
  1070. }
  1071. break;
  1072. default:
  1073. err = -ENOPROTOOPT;
  1074. break;
  1075. }
  1076. done:
  1077. release_sock(sk);
  1078. return err;
  1079. }
  1080. static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
  1081. char __user *optval, int __user *optlen)
  1082. {
  1083. struct hci_ufilter uf;
  1084. struct sock *sk = sock->sk;
  1085. int len, opt, err = 0;
  1086. BT_DBG("sk %p, opt %d", sk, optname);
  1087. if (get_user(len, optlen))
  1088. return -EFAULT;
  1089. lock_sock(sk);
  1090. if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
  1091. err = -EBADFD;
  1092. goto done;
  1093. }
  1094. switch (optname) {
  1095. case HCI_DATA_DIR:
  1096. if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
  1097. opt = 1;
  1098. else
  1099. opt = 0;
  1100. if (put_user(opt, optval))
  1101. err = -EFAULT;
  1102. break;
  1103. case HCI_TIME_STAMP:
  1104. if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
  1105. opt = 1;
  1106. else
  1107. opt = 0;
  1108. if (put_user(opt, optval))
  1109. err = -EFAULT;
  1110. break;
  1111. case HCI_FILTER:
  1112. {
  1113. struct hci_filter *f = &hci_pi(sk)->filter;
  1114. memset(&uf, 0, sizeof(uf));
  1115. uf.type_mask = f->type_mask;
  1116. uf.opcode = f->opcode;
  1117. uf.event_mask[0] = *((u32 *) f->event_mask + 0);
  1118. uf.event_mask[1] = *((u32 *) f->event_mask + 1);
  1119. }
  1120. len = min_t(unsigned int, len, sizeof(uf));
  1121. if (copy_to_user(optval, &uf, len))
  1122. err = -EFAULT;
  1123. break;
  1124. default:
  1125. err = -ENOPROTOOPT;
  1126. break;
  1127. }
  1128. done:
  1129. release_sock(sk);
  1130. return err;
  1131. }
  1132. static const struct proto_ops hci_sock_ops = {
  1133. .family = PF_BLUETOOTH,
  1134. .owner = THIS_MODULE,
  1135. .release = hci_sock_release,
  1136. .bind = hci_sock_bind,
  1137. .getname = hci_sock_getname,
  1138. .sendmsg = hci_sock_sendmsg,
  1139. .recvmsg = hci_sock_recvmsg,
  1140. .ioctl = hci_sock_ioctl,
  1141. .poll = datagram_poll,
  1142. .listen = sock_no_listen,
  1143. .shutdown = sock_no_shutdown,
  1144. .setsockopt = hci_sock_setsockopt,
  1145. .getsockopt = hci_sock_getsockopt,
  1146. .connect = sock_no_connect,
  1147. .socketpair = sock_no_socketpair,
  1148. .accept = sock_no_accept,
  1149. .mmap = sock_no_mmap
  1150. };
  1151. static struct proto hci_sk_proto = {
  1152. .name = "HCI",
  1153. .owner = THIS_MODULE,
  1154. .obj_size = sizeof(struct hci_pinfo)
  1155. };
  1156. static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
  1157. int kern)
  1158. {
  1159. struct sock *sk;
  1160. BT_DBG("sock %p", sock);
  1161. if (sock->type != SOCK_RAW)
  1162. return -ESOCKTNOSUPPORT;
  1163. sock->ops = &hci_sock_ops;
  1164. sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
  1165. if (!sk)
  1166. return -ENOMEM;
  1167. sock_init_data(sock, sk);
  1168. sock_reset_flag(sk, SOCK_ZAPPED);
  1169. sk->sk_protocol = protocol;
  1170. sock->state = SS_UNCONNECTED;
  1171. sk->sk_state = BT_OPEN;
  1172. bt_sock_link(&hci_sk_list, sk);
  1173. return 0;
  1174. }
  1175. static const struct net_proto_family hci_sock_family_ops = {
  1176. .family = PF_BLUETOOTH,
  1177. .owner = THIS_MODULE,
  1178. .create = hci_sock_create,
  1179. };
  1180. int __init hci_sock_init(void)
  1181. {
  1182. int err;
  1183. BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
  1184. err = proto_register(&hci_sk_proto, 0);
  1185. if (err < 0)
  1186. return err;
  1187. err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
  1188. if (err < 0) {
  1189. BT_ERR("HCI socket registration failed");
  1190. goto error;
  1191. }
  1192. err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
  1193. if (err < 0) {
  1194. BT_ERR("Failed to create HCI proc file");
  1195. bt_sock_unregister(BTPROTO_HCI);
  1196. goto error;
  1197. }
  1198. BT_INFO("HCI socket layer initialized");
  1199. return 0;
  1200. error:
  1201. proto_unregister(&hci_sk_proto);
  1202. return err;
  1203. }
  1204. void hci_sock_cleanup(void)
  1205. {
  1206. bt_procfs_cleanup(&init_net, "hci");
  1207. bt_sock_unregister(BTPROTO_HCI);
  1208. proto_unregister(&hci_sk_proto);
  1209. }