ar-accept.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510
  1. /* incoming call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/errqueue.h>
  15. #include <linux/udp.h>
  16. #include <linux/in.h>
  17. #include <linux/in6.h>
  18. #include <linux/icmp.h>
  19. #include <linux/gfp.h>
  20. #include <net/sock.h>
  21. #include <net/af_rxrpc.h>
  22. #include <net/ip.h>
  23. #include "ar-internal.h"
  24. /*
  25. * generate a connection-level abort
  26. */
  27. static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
  28. struct rxrpc_header *hdr)
  29. {
  30. struct msghdr msg;
  31. struct kvec iov[1];
  32. size_t len;
  33. int ret;
  34. _enter("%d,,", local->debug_id);
  35. msg.msg_name = &srx->transport.sin;
  36. msg.msg_namelen = sizeof(srx->transport.sin);
  37. msg.msg_control = NULL;
  38. msg.msg_controllen = 0;
  39. msg.msg_flags = 0;
  40. hdr->seq = 0;
  41. hdr->type = RXRPC_PACKET_TYPE_BUSY;
  42. hdr->flags = 0;
  43. hdr->userStatus = 0;
  44. hdr->_rsvd = 0;
  45. iov[0].iov_base = hdr;
  46. iov[0].iov_len = sizeof(*hdr);
  47. len = iov[0].iov_len;
  48. hdr->serial = htonl(1);
  49. _proto("Tx BUSY %%%u", ntohl(hdr->serial));
  50. ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
  51. if (ret < 0) {
  52. _leave(" = -EAGAIN [sendmsg failed: %d]", ret);
  53. return -EAGAIN;
  54. }
  55. _leave(" = 0");
  56. return 0;
  57. }
  58. /*
  59. * accept an incoming call that needs peer, transport and/or connection setting
  60. * up
  61. */
  62. static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
  63. struct rxrpc_sock *rx,
  64. struct sk_buff *skb,
  65. struct sockaddr_rxrpc *srx)
  66. {
  67. struct rxrpc_connection *conn;
  68. struct rxrpc_transport *trans;
  69. struct rxrpc_skb_priv *sp, *nsp;
  70. struct rxrpc_peer *peer;
  71. struct rxrpc_call *call;
  72. struct sk_buff *notification;
  73. int ret;
  74. _enter("");
  75. sp = rxrpc_skb(skb);
  76. /* get a notification message to send to the server app */
  77. notification = alloc_skb(0, GFP_NOFS);
  78. if (!notification) {
  79. _debug("no memory");
  80. ret = -ENOMEM;
  81. goto error_nofree;
  82. }
  83. rxrpc_new_skb(notification);
  84. notification->mark = RXRPC_SKB_MARK_NEW_CALL;
  85. peer = rxrpc_get_peer(srx, GFP_NOIO);
  86. if (IS_ERR(peer)) {
  87. _debug("no peer");
  88. ret = -EBUSY;
  89. goto error;
  90. }
  91. trans = rxrpc_get_transport(local, peer, GFP_NOIO);
  92. rxrpc_put_peer(peer);
  93. if (IS_ERR(trans)) {
  94. _debug("no trans");
  95. ret = -EBUSY;
  96. goto error;
  97. }
  98. conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
  99. rxrpc_put_transport(trans);
  100. if (IS_ERR(conn)) {
  101. _debug("no conn");
  102. ret = PTR_ERR(conn);
  103. goto error;
  104. }
  105. call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
  106. rxrpc_put_connection(conn);
  107. if (IS_ERR(call)) {
  108. _debug("no call");
  109. ret = PTR_ERR(call);
  110. goto error;
  111. }
  112. /* attach the call to the socket */
  113. read_lock_bh(&local->services_lock);
  114. if (rx->sk.sk_state == RXRPC_CLOSE)
  115. goto invalid_service;
  116. write_lock(&rx->call_lock);
  117. if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
  118. rxrpc_get_call(call);
  119. spin_lock(&call->conn->state_lock);
  120. if (sp->hdr.securityIndex > 0 &&
  121. call->conn->state == RXRPC_CONN_SERVER_UNSECURED) {
  122. _debug("await conn sec");
  123. list_add_tail(&call->accept_link, &rx->secureq);
  124. call->conn->state = RXRPC_CONN_SERVER_CHALLENGING;
  125. atomic_inc(&call->conn->usage);
  126. set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events);
  127. rxrpc_queue_conn(call->conn);
  128. } else {
  129. _debug("conn ready");
  130. call->state = RXRPC_CALL_SERVER_ACCEPTING;
  131. list_add_tail(&call->accept_link, &rx->acceptq);
  132. rxrpc_get_call(call);
  133. nsp = rxrpc_skb(notification);
  134. nsp->call = call;
  135. ASSERTCMP(atomic_read(&call->usage), >=, 3);
  136. _debug("notify");
  137. spin_lock(&call->lock);
  138. ret = rxrpc_queue_rcv_skb(call, notification, true,
  139. false);
  140. spin_unlock(&call->lock);
  141. notification = NULL;
  142. BUG_ON(ret < 0);
  143. }
  144. spin_unlock(&call->conn->state_lock);
  145. _debug("queued");
  146. }
  147. write_unlock(&rx->call_lock);
  148. _debug("process");
  149. rxrpc_fast_process_packet(call, skb);
  150. _debug("done");
  151. read_unlock_bh(&local->services_lock);
  152. rxrpc_free_skb(notification);
  153. rxrpc_put_call(call);
  154. _leave(" = 0");
  155. return 0;
  156. invalid_service:
  157. _debug("invalid");
  158. read_unlock_bh(&local->services_lock);
  159. read_lock_bh(&call->state_lock);
  160. if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) &&
  161. !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) {
  162. rxrpc_get_call(call);
  163. rxrpc_queue_call(call);
  164. }
  165. read_unlock_bh(&call->state_lock);
  166. rxrpc_put_call(call);
  167. ret = -ECONNREFUSED;
  168. error:
  169. rxrpc_free_skb(notification);
  170. error_nofree:
  171. _leave(" = %d", ret);
  172. return ret;
  173. }
  174. /*
  175. * accept incoming calls that need peer, transport and/or connection setting up
  176. * - the packets we get are all incoming client DATA packets that have seq == 1
  177. */
  178. void rxrpc_accept_incoming_calls(struct work_struct *work)
  179. {
  180. struct rxrpc_local *local =
  181. container_of(work, struct rxrpc_local, acceptor);
  182. struct rxrpc_skb_priv *sp;
  183. struct sockaddr_rxrpc srx;
  184. struct rxrpc_sock *rx;
  185. struct sk_buff *skb;
  186. __be16 service_id;
  187. int ret;
  188. _enter("%d", local->debug_id);
  189. read_lock_bh(&rxrpc_local_lock);
  190. if (atomic_read(&local->usage) > 0)
  191. rxrpc_get_local(local);
  192. else
  193. local = NULL;
  194. read_unlock_bh(&rxrpc_local_lock);
  195. if (!local) {
  196. _leave(" [local dead]");
  197. return;
  198. }
  199. process_next_packet:
  200. skb = skb_dequeue(&local->accept_queue);
  201. if (!skb) {
  202. rxrpc_put_local(local);
  203. _leave("\n");
  204. return;
  205. }
  206. _net("incoming call skb %p", skb);
  207. sp = rxrpc_skb(skb);
  208. /* determine the remote address */
  209. memset(&srx, 0, sizeof(srx));
  210. srx.srx_family = AF_RXRPC;
  211. srx.transport.family = local->srx.transport.family;
  212. srx.transport_type = local->srx.transport_type;
  213. switch (srx.transport.family) {
  214. case AF_INET:
  215. srx.transport_len = sizeof(struct sockaddr_in);
  216. srx.transport.sin.sin_port = udp_hdr(skb)->source;
  217. srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
  218. break;
  219. default:
  220. goto busy;
  221. }
  222. /* get the socket providing the service */
  223. service_id = sp->hdr.serviceId;
  224. read_lock_bh(&local->services_lock);
  225. list_for_each_entry(rx, &local->services, listen_link) {
  226. if (rx->service_id == service_id &&
  227. rx->sk.sk_state != RXRPC_CLOSE)
  228. goto found_service;
  229. }
  230. read_unlock_bh(&local->services_lock);
  231. goto invalid_service;
  232. found_service:
  233. _debug("found service %hd", ntohs(rx->service_id));
  234. if (sk_acceptq_is_full(&rx->sk))
  235. goto backlog_full;
  236. sk_acceptq_added(&rx->sk);
  237. sock_hold(&rx->sk);
  238. read_unlock_bh(&local->services_lock);
  239. ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
  240. if (ret < 0)
  241. sk_acceptq_removed(&rx->sk);
  242. sock_put(&rx->sk);
  243. switch (ret) {
  244. case -ECONNRESET: /* old calls are ignored */
  245. case -ECONNABORTED: /* aborted calls are reaborted or ignored */
  246. case 0:
  247. goto process_next_packet;
  248. case -ECONNREFUSED:
  249. goto invalid_service;
  250. case -EBUSY:
  251. goto busy;
  252. case -EKEYREJECTED:
  253. goto security_mismatch;
  254. default:
  255. BUG();
  256. }
  257. backlog_full:
  258. read_unlock_bh(&local->services_lock);
  259. busy:
  260. rxrpc_busy(local, &srx, &sp->hdr);
  261. rxrpc_free_skb(skb);
  262. goto process_next_packet;
  263. invalid_service:
  264. skb->priority = RX_INVALID_OPERATION;
  265. rxrpc_reject_packet(local, skb);
  266. goto process_next_packet;
  267. /* can't change connection security type mid-flow */
  268. security_mismatch:
  269. skb->priority = RX_PROTOCOL_ERROR;
  270. rxrpc_reject_packet(local, skb);
  271. goto process_next_packet;
  272. }
  273. /*
  274. * handle acceptance of a call by userspace
  275. * - assign the user call ID to the call at the front of the queue
  276. */
  277. struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
  278. unsigned long user_call_ID)
  279. {
  280. struct rxrpc_call *call;
  281. struct rb_node *parent, **pp;
  282. int ret;
  283. _enter(",%lx", user_call_ID);
  284. ASSERT(!irqs_disabled());
  285. write_lock(&rx->call_lock);
  286. ret = -ENODATA;
  287. if (list_empty(&rx->acceptq))
  288. goto out;
  289. /* check the user ID isn't already in use */
  290. ret = -EBADSLT;
  291. pp = &rx->calls.rb_node;
  292. parent = NULL;
  293. while (*pp) {
  294. parent = *pp;
  295. call = rb_entry(parent, struct rxrpc_call, sock_node);
  296. if (user_call_ID < call->user_call_ID)
  297. pp = &(*pp)->rb_left;
  298. else if (user_call_ID > call->user_call_ID)
  299. pp = &(*pp)->rb_right;
  300. else
  301. goto out;
  302. }
  303. /* dequeue the first call and check it's still valid */
  304. call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
  305. list_del_init(&call->accept_link);
  306. sk_acceptq_removed(&rx->sk);
  307. write_lock_bh(&call->state_lock);
  308. switch (call->state) {
  309. case RXRPC_CALL_SERVER_ACCEPTING:
  310. call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
  311. break;
  312. case RXRPC_CALL_REMOTELY_ABORTED:
  313. case RXRPC_CALL_LOCALLY_ABORTED:
  314. ret = -ECONNABORTED;
  315. goto out_release;
  316. case RXRPC_CALL_NETWORK_ERROR:
  317. ret = call->conn->error;
  318. goto out_release;
  319. case RXRPC_CALL_DEAD:
  320. ret = -ETIME;
  321. goto out_discard;
  322. default:
  323. BUG();
  324. }
  325. /* formalise the acceptance */
  326. call->user_call_ID = user_call_ID;
  327. rb_link_node(&call->sock_node, parent, pp);
  328. rb_insert_color(&call->sock_node, &rx->calls);
  329. if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
  330. BUG();
  331. if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events))
  332. BUG();
  333. rxrpc_queue_call(call);
  334. rxrpc_get_call(call);
  335. write_unlock_bh(&call->state_lock);
  336. write_unlock(&rx->call_lock);
  337. _leave(" = %p{%d}", call, call->debug_id);
  338. return call;
  339. /* if the call is already dying or dead, then we leave the socket's ref
  340. * on it to be released by rxrpc_dead_call_expired() as induced by
  341. * rxrpc_release_call() */
  342. out_release:
  343. _debug("release %p", call);
  344. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  345. !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  346. rxrpc_queue_call(call);
  347. out_discard:
  348. write_unlock_bh(&call->state_lock);
  349. _debug("discard %p", call);
  350. out:
  351. write_unlock(&rx->call_lock);
  352. _leave(" = %d", ret);
  353. return ERR_PTR(ret);
  354. }
  355. /*
  356. * handle rejectance of a call by userspace
  357. * - reject the call at the front of the queue
  358. */
  359. int rxrpc_reject_call(struct rxrpc_sock *rx)
  360. {
  361. struct rxrpc_call *call;
  362. int ret;
  363. _enter("");
  364. ASSERT(!irqs_disabled());
  365. write_lock(&rx->call_lock);
  366. ret = -ENODATA;
  367. if (list_empty(&rx->acceptq))
  368. goto out;
  369. /* dequeue the first call and check it's still valid */
  370. call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
  371. list_del_init(&call->accept_link);
  372. sk_acceptq_removed(&rx->sk);
  373. write_lock_bh(&call->state_lock);
  374. switch (call->state) {
  375. case RXRPC_CALL_SERVER_ACCEPTING:
  376. call->state = RXRPC_CALL_SERVER_BUSY;
  377. if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events))
  378. rxrpc_queue_call(call);
  379. ret = 0;
  380. goto out_release;
  381. case RXRPC_CALL_REMOTELY_ABORTED:
  382. case RXRPC_CALL_LOCALLY_ABORTED:
  383. ret = -ECONNABORTED;
  384. goto out_release;
  385. case RXRPC_CALL_NETWORK_ERROR:
  386. ret = call->conn->error;
  387. goto out_release;
  388. case RXRPC_CALL_DEAD:
  389. ret = -ETIME;
  390. goto out_discard;
  391. default:
  392. BUG();
  393. }
  394. /* if the call is already dying or dead, then we leave the socket's ref
  395. * on it to be released by rxrpc_dead_call_expired() as induced by
  396. * rxrpc_release_call() */
  397. out_release:
  398. _debug("release %p", call);
  399. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  400. !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  401. rxrpc_queue_call(call);
  402. out_discard:
  403. write_unlock_bh(&call->state_lock);
  404. _debug("discard %p", call);
  405. out:
  406. write_unlock(&rx->call_lock);
  407. _leave(" = %d", ret);
  408. return ret;
  409. }
  410. /**
  411. * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
  412. * @sock: The socket on which the impending call is waiting
  413. * @user_call_ID: The tag to attach to the call
  414. *
  415. * Allow a kernel service to accept an incoming call, assuming the incoming
  416. * call is still valid.
  417. */
  418. struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
  419. unsigned long user_call_ID)
  420. {
  421. struct rxrpc_call *call;
  422. _enter(",%lx", user_call_ID);
  423. call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID);
  424. _leave(" = %p", call);
  425. return call;
  426. }
  427. EXPORT_SYMBOL(rxrpc_kernel_accept_call);
  428. /**
  429. * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
  430. * @sock: The socket on which the impending call is waiting
  431. *
  432. * Allow a kernel service to reject an incoming call with a BUSY message,
  433. * assuming the incoming call is still valid.
  434. */
  435. int rxrpc_kernel_reject_call(struct socket *sock)
  436. {
  437. int ret;
  438. _enter("");
  439. ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
  440. _leave(" = %d", ret);
  441. return ret;
  442. }
  443. EXPORT_SYMBOL(rxrpc_kernel_reject_call);