ar-call.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019
  1. /* RxRPC individual remote procedure call handling
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/module.h>
  13. #include <linux/circ_buf.h>
  14. #include <linux/hashtable.h>
  15. #include <linux/spinlock_types.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Maximum lifetime of a call (in jiffies).
  21. */
  22. unsigned rxrpc_max_call_lifetime = 60 * HZ;
  23. /*
  24. * Time till dead call expires after last use (in jiffies).
  25. */
  26. unsigned rxrpc_dead_call_expiry = 2 * HZ;
  27. const char *const rxrpc_call_states[] = {
  28. [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
  29. [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
  30. [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
  31. [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
  32. [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
  33. [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
  34. [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
  35. [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
  36. [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
  37. [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
  38. [RXRPC_CALL_COMPLETE] = "Complete",
  39. [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
  40. [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
  41. [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
  42. [RXRPC_CALL_NETWORK_ERROR] = "NetError",
  43. [RXRPC_CALL_DEAD] = "Dead ",
  44. };
  45. struct kmem_cache *rxrpc_call_jar;
  46. LIST_HEAD(rxrpc_calls);
  47. DEFINE_RWLOCK(rxrpc_call_lock);
  48. static void rxrpc_destroy_call(struct work_struct *work);
  49. static void rxrpc_call_life_expired(unsigned long _call);
  50. static void rxrpc_dead_call_expired(unsigned long _call);
  51. static void rxrpc_ack_time_expired(unsigned long _call);
  52. static void rxrpc_resend_time_expired(unsigned long _call);
  53. static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
  54. static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
  55. /*
  56. * Hash function for rxrpc_call_hash
  57. */
  58. static unsigned long rxrpc_call_hashfunc(
  59. u8 clientflag,
  60. __be32 cid,
  61. __be32 call_id,
  62. __be32 epoch,
  63. __be16 service_id,
  64. sa_family_t proto,
  65. void *localptr,
  66. unsigned int addr_size,
  67. const u8 *peer_addr)
  68. {
  69. const u16 *p;
  70. unsigned int i;
  71. unsigned long key;
  72. u32 hcid = ntohl(cid);
  73. _enter("");
  74. key = (unsigned long)localptr;
  75. /* We just want to add up the __be32 values, so forcing the
  76. * cast should be okay.
  77. */
  78. key += (__force u32)epoch;
  79. key += (__force u16)service_id;
  80. key += (__force u32)call_id;
  81. key += (hcid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
  82. key += hcid & RXRPC_CHANNELMASK;
  83. key += clientflag;
  84. key += proto;
  85. /* Step through the peer address in 16-bit portions for speed */
  86. for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
  87. key += *p;
  88. _leave(" key = 0x%lx", key);
  89. return key;
  90. }
  91. /*
  92. * Add a call to the hashtable
  93. */
  94. static void rxrpc_call_hash_add(struct rxrpc_call *call)
  95. {
  96. unsigned long key;
  97. unsigned int addr_size = 0;
  98. _enter("");
  99. switch (call->proto) {
  100. case AF_INET:
  101. addr_size = sizeof(call->peer_ip.ipv4_addr);
  102. break;
  103. case AF_INET6:
  104. addr_size = sizeof(call->peer_ip.ipv6_addr);
  105. break;
  106. default:
  107. break;
  108. }
  109. key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
  110. call->call_id, call->epoch,
  111. call->service_id, call->proto,
  112. call->conn->trans->local, addr_size,
  113. call->peer_ip.ipv6_addr);
  114. /* Store the full key in the call */
  115. call->hash_key = key;
  116. spin_lock(&rxrpc_call_hash_lock);
  117. hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
  118. spin_unlock(&rxrpc_call_hash_lock);
  119. _leave("");
  120. }
  121. /*
  122. * Remove a call from the hashtable
  123. */
  124. static void rxrpc_call_hash_del(struct rxrpc_call *call)
  125. {
  126. _enter("");
  127. spin_lock(&rxrpc_call_hash_lock);
  128. hash_del_rcu(&call->hash_node);
  129. spin_unlock(&rxrpc_call_hash_lock);
  130. _leave("");
  131. }
  132. /*
  133. * Find a call in the hashtable and return it, or NULL if it
  134. * isn't there.
  135. */
  136. struct rxrpc_call *rxrpc_find_call_hash(
  137. u8 clientflag,
  138. __be32 cid,
  139. __be32 call_id,
  140. __be32 epoch,
  141. __be16 service_id,
  142. void *localptr,
  143. sa_family_t proto,
  144. const u8 *peer_addr)
  145. {
  146. unsigned long key;
  147. unsigned int addr_size = 0;
  148. struct rxrpc_call *call = NULL;
  149. struct rxrpc_call *ret = NULL;
  150. _enter("");
  151. switch (proto) {
  152. case AF_INET:
  153. addr_size = sizeof(call->peer_ip.ipv4_addr);
  154. break;
  155. case AF_INET6:
  156. addr_size = sizeof(call->peer_ip.ipv6_addr);
  157. break;
  158. default:
  159. break;
  160. }
  161. key = rxrpc_call_hashfunc(clientflag, cid, call_id, epoch,
  162. service_id, proto, localptr, addr_size,
  163. peer_addr);
  164. hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
  165. if (call->hash_key == key &&
  166. call->call_id == call_id &&
  167. call->cid == cid &&
  168. call->in_clientflag == clientflag &&
  169. call->service_id == service_id &&
  170. call->proto == proto &&
  171. call->local == localptr &&
  172. memcmp(call->peer_ip.ipv6_addr, peer_addr,
  173. addr_size) == 0 &&
  174. call->epoch == epoch) {
  175. ret = call;
  176. break;
  177. }
  178. }
  179. _leave(" = %p", ret);
  180. return ret;
  181. }
  182. /*
  183. * allocate a new call
  184. */
  185. static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
  186. {
  187. struct rxrpc_call *call;
  188. call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
  189. if (!call)
  190. return NULL;
  191. call->acks_winsz = 16;
  192. call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
  193. gfp);
  194. if (!call->acks_window) {
  195. kmem_cache_free(rxrpc_call_jar, call);
  196. return NULL;
  197. }
  198. setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
  199. (unsigned long) call);
  200. setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
  201. (unsigned long) call);
  202. setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
  203. (unsigned long) call);
  204. setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
  205. (unsigned long) call);
  206. INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
  207. INIT_WORK(&call->processor, &rxrpc_process_call);
  208. INIT_LIST_HEAD(&call->accept_link);
  209. skb_queue_head_init(&call->rx_queue);
  210. skb_queue_head_init(&call->rx_oos_queue);
  211. init_waitqueue_head(&call->tx_waitq);
  212. spin_lock_init(&call->lock);
  213. rwlock_init(&call->state_lock);
  214. atomic_set(&call->usage, 1);
  215. call->debug_id = atomic_inc_return(&rxrpc_debug_id);
  216. call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
  217. memset(&call->sock_node, 0xed, sizeof(call->sock_node));
  218. call->rx_data_expect = 1;
  219. call->rx_data_eaten = 0;
  220. call->rx_first_oos = 0;
  221. call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
  222. call->creation_jif = jiffies;
  223. return call;
  224. }
  225. /*
  226. * allocate a new client call and attempt to get a connection slot for it
  227. */
  228. static struct rxrpc_call *rxrpc_alloc_client_call(
  229. struct rxrpc_sock *rx,
  230. struct rxrpc_transport *trans,
  231. struct rxrpc_conn_bundle *bundle,
  232. gfp_t gfp)
  233. {
  234. struct rxrpc_call *call;
  235. int ret;
  236. _enter("");
  237. ASSERT(rx != NULL);
  238. ASSERT(trans != NULL);
  239. ASSERT(bundle != NULL);
  240. call = rxrpc_alloc_call(gfp);
  241. if (!call)
  242. return ERR_PTR(-ENOMEM);
  243. sock_hold(&rx->sk);
  244. call->socket = rx;
  245. call->rx_data_post = 1;
  246. ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
  247. if (ret < 0) {
  248. kmem_cache_free(rxrpc_call_jar, call);
  249. return ERR_PTR(ret);
  250. }
  251. /* Record copies of information for hashtable lookup */
  252. call->proto = rx->proto;
  253. call->local = trans->local;
  254. switch (call->proto) {
  255. case AF_INET:
  256. call->peer_ip.ipv4_addr =
  257. trans->peer->srx.transport.sin.sin_addr.s_addr;
  258. break;
  259. case AF_INET6:
  260. memcpy(call->peer_ip.ipv6_addr,
  261. trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
  262. sizeof(call->peer_ip.ipv6_addr));
  263. break;
  264. }
  265. call->epoch = call->conn->epoch;
  266. call->service_id = call->conn->service_id;
  267. call->in_clientflag = call->conn->in_clientflag;
  268. /* Add the new call to the hashtable */
  269. rxrpc_call_hash_add(call);
  270. spin_lock(&call->conn->trans->peer->lock);
  271. list_add(&call->error_link, &call->conn->trans->peer->error_targets);
  272. spin_unlock(&call->conn->trans->peer->lock);
  273. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  274. add_timer(&call->lifetimer);
  275. _leave(" = %p", call);
  276. return call;
  277. }
  278. /*
  279. * set up a call for the given data
  280. * - called in process context with IRQs enabled
  281. */
  282. struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
  283. struct rxrpc_transport *trans,
  284. struct rxrpc_conn_bundle *bundle,
  285. unsigned long user_call_ID,
  286. int create,
  287. gfp_t gfp)
  288. {
  289. struct rxrpc_call *call, *candidate;
  290. struct rb_node *p, *parent, **pp;
  291. _enter("%p,%d,%d,%lx,%d",
  292. rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
  293. user_call_ID, create);
  294. /* search the extant calls first for one that matches the specified
  295. * user ID */
  296. read_lock(&rx->call_lock);
  297. p = rx->calls.rb_node;
  298. while (p) {
  299. call = rb_entry(p, struct rxrpc_call, sock_node);
  300. if (user_call_ID < call->user_call_ID)
  301. p = p->rb_left;
  302. else if (user_call_ID > call->user_call_ID)
  303. p = p->rb_right;
  304. else
  305. goto found_extant_call;
  306. }
  307. read_unlock(&rx->call_lock);
  308. if (!create || !trans)
  309. return ERR_PTR(-EBADSLT);
  310. /* not yet present - create a candidate for a new record and then
  311. * redo the search */
  312. candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
  313. if (IS_ERR(candidate)) {
  314. _leave(" = %ld", PTR_ERR(candidate));
  315. return candidate;
  316. }
  317. candidate->user_call_ID = user_call_ID;
  318. __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
  319. write_lock(&rx->call_lock);
  320. pp = &rx->calls.rb_node;
  321. parent = NULL;
  322. while (*pp) {
  323. parent = *pp;
  324. call = rb_entry(parent, struct rxrpc_call, sock_node);
  325. if (user_call_ID < call->user_call_ID)
  326. pp = &(*pp)->rb_left;
  327. else if (user_call_ID > call->user_call_ID)
  328. pp = &(*pp)->rb_right;
  329. else
  330. goto found_extant_second;
  331. }
  332. /* second search also failed; add the new call */
  333. call = candidate;
  334. candidate = NULL;
  335. rxrpc_get_call(call);
  336. rb_link_node(&call->sock_node, parent, pp);
  337. rb_insert_color(&call->sock_node, &rx->calls);
  338. write_unlock(&rx->call_lock);
  339. write_lock_bh(&rxrpc_call_lock);
  340. list_add_tail(&call->link, &rxrpc_calls);
  341. write_unlock_bh(&rxrpc_call_lock);
  342. _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
  343. _leave(" = %p [new]", call);
  344. return call;
  345. /* we found the call in the list immediately */
  346. found_extant_call:
  347. rxrpc_get_call(call);
  348. read_unlock(&rx->call_lock);
  349. _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
  350. return call;
  351. /* we found the call on the second time through the list */
  352. found_extant_second:
  353. rxrpc_get_call(call);
  354. write_unlock(&rx->call_lock);
  355. rxrpc_put_call(candidate);
  356. _leave(" = %p [second %d]", call, atomic_read(&call->usage));
  357. return call;
  358. }
  359. /*
  360. * set up an incoming call
  361. * - called in process context with IRQs enabled
  362. */
  363. struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
  364. struct rxrpc_connection *conn,
  365. struct rxrpc_header *hdr,
  366. gfp_t gfp)
  367. {
  368. struct rxrpc_call *call, *candidate;
  369. struct rb_node **p, *parent;
  370. __be32 call_id;
  371. _enter(",%d,,%x", conn->debug_id, gfp);
  372. ASSERT(rx != NULL);
  373. candidate = rxrpc_alloc_call(gfp);
  374. if (!candidate)
  375. return ERR_PTR(-EBUSY);
  376. candidate->socket = rx;
  377. candidate->conn = conn;
  378. candidate->cid = hdr->cid;
  379. candidate->call_id = hdr->callNumber;
  380. candidate->channel = ntohl(hdr->cid) & RXRPC_CHANNELMASK;
  381. candidate->rx_data_post = 0;
  382. candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
  383. if (conn->security_ix > 0)
  384. candidate->state = RXRPC_CALL_SERVER_SECURING;
  385. write_lock_bh(&conn->lock);
  386. /* set the channel for this call */
  387. call = conn->channels[candidate->channel];
  388. _debug("channel[%u] is %p", candidate->channel, call);
  389. if (call && call->call_id == hdr->callNumber) {
  390. /* already set; must've been a duplicate packet */
  391. _debug("extant call [%d]", call->state);
  392. ASSERTCMP(call->conn, ==, conn);
  393. read_lock(&call->state_lock);
  394. switch (call->state) {
  395. case RXRPC_CALL_LOCALLY_ABORTED:
  396. if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
  397. rxrpc_queue_call(call);
  398. case RXRPC_CALL_REMOTELY_ABORTED:
  399. read_unlock(&call->state_lock);
  400. goto aborted_call;
  401. default:
  402. rxrpc_get_call(call);
  403. read_unlock(&call->state_lock);
  404. goto extant_call;
  405. }
  406. }
  407. if (call) {
  408. /* it seems the channel is still in use from the previous call
  409. * - ditch the old binding if its call is now complete */
  410. _debug("CALL: %u { %s }",
  411. call->debug_id, rxrpc_call_states[call->state]);
  412. if (call->state >= RXRPC_CALL_COMPLETE) {
  413. conn->channels[call->channel] = NULL;
  414. } else {
  415. write_unlock_bh(&conn->lock);
  416. kmem_cache_free(rxrpc_call_jar, candidate);
  417. _leave(" = -EBUSY");
  418. return ERR_PTR(-EBUSY);
  419. }
  420. }
  421. /* check the call number isn't duplicate */
  422. _debug("check dup");
  423. call_id = hdr->callNumber;
  424. p = &conn->calls.rb_node;
  425. parent = NULL;
  426. while (*p) {
  427. parent = *p;
  428. call = rb_entry(parent, struct rxrpc_call, conn_node);
  429. /* The tree is sorted in order of the __be32 value without
  430. * turning it into host order.
  431. */
  432. if ((__force u32)call_id < (__force u32)call->call_id)
  433. p = &(*p)->rb_left;
  434. else if ((__force u32)call_id > (__force u32)call->call_id)
  435. p = &(*p)->rb_right;
  436. else
  437. goto old_call;
  438. }
  439. /* make the call available */
  440. _debug("new call");
  441. call = candidate;
  442. candidate = NULL;
  443. rb_link_node(&call->conn_node, parent, p);
  444. rb_insert_color(&call->conn_node, &conn->calls);
  445. conn->channels[call->channel] = call;
  446. sock_hold(&rx->sk);
  447. atomic_inc(&conn->usage);
  448. write_unlock_bh(&conn->lock);
  449. spin_lock(&conn->trans->peer->lock);
  450. list_add(&call->error_link, &conn->trans->peer->error_targets);
  451. spin_unlock(&conn->trans->peer->lock);
  452. write_lock_bh(&rxrpc_call_lock);
  453. list_add_tail(&call->link, &rxrpc_calls);
  454. write_unlock_bh(&rxrpc_call_lock);
  455. /* Record copies of information for hashtable lookup */
  456. call->proto = rx->proto;
  457. call->local = conn->trans->local;
  458. switch (call->proto) {
  459. case AF_INET:
  460. call->peer_ip.ipv4_addr =
  461. conn->trans->peer->srx.transport.sin.sin_addr.s_addr;
  462. break;
  463. case AF_INET6:
  464. memcpy(call->peer_ip.ipv6_addr,
  465. conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
  466. sizeof(call->peer_ip.ipv6_addr));
  467. break;
  468. default:
  469. break;
  470. }
  471. call->epoch = conn->epoch;
  472. call->service_id = conn->service_id;
  473. call->in_clientflag = conn->in_clientflag;
  474. /* Add the new call to the hashtable */
  475. rxrpc_call_hash_add(call);
  476. _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
  477. call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
  478. add_timer(&call->lifetimer);
  479. _leave(" = %p {%d} [new]", call, call->debug_id);
  480. return call;
  481. extant_call:
  482. write_unlock_bh(&conn->lock);
  483. kmem_cache_free(rxrpc_call_jar, candidate);
  484. _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
  485. return call;
  486. aborted_call:
  487. write_unlock_bh(&conn->lock);
  488. kmem_cache_free(rxrpc_call_jar, candidate);
  489. _leave(" = -ECONNABORTED");
  490. return ERR_PTR(-ECONNABORTED);
  491. old_call:
  492. write_unlock_bh(&conn->lock);
  493. kmem_cache_free(rxrpc_call_jar, candidate);
  494. _leave(" = -ECONNRESET [old]");
  495. return ERR_PTR(-ECONNRESET);
  496. }
  497. /*
  498. * find an extant server call
  499. * - called in process context with IRQs enabled
  500. */
  501. struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
  502. unsigned long user_call_ID)
  503. {
  504. struct rxrpc_call *call;
  505. struct rb_node *p;
  506. _enter("%p,%lx", rx, user_call_ID);
  507. /* search the extant calls for one that matches the specified user
  508. * ID */
  509. read_lock(&rx->call_lock);
  510. p = rx->calls.rb_node;
  511. while (p) {
  512. call = rb_entry(p, struct rxrpc_call, sock_node);
  513. if (user_call_ID < call->user_call_ID)
  514. p = p->rb_left;
  515. else if (user_call_ID > call->user_call_ID)
  516. p = p->rb_right;
  517. else
  518. goto found_extant_call;
  519. }
  520. read_unlock(&rx->call_lock);
  521. _leave(" = NULL");
  522. return NULL;
  523. /* we found the call in the list immediately */
  524. found_extant_call:
  525. rxrpc_get_call(call);
  526. read_unlock(&rx->call_lock);
  527. _leave(" = %p [%d]", call, atomic_read(&call->usage));
  528. return call;
  529. }
  530. /*
  531. * detach a call from a socket and set up for release
  532. */
  533. void rxrpc_release_call(struct rxrpc_call *call)
  534. {
  535. struct rxrpc_connection *conn = call->conn;
  536. struct rxrpc_sock *rx = call->socket;
  537. _enter("{%d,%d,%d,%d}",
  538. call->debug_id, atomic_read(&call->usage),
  539. atomic_read(&call->ackr_not_idle),
  540. call->rx_first_oos);
  541. spin_lock_bh(&call->lock);
  542. if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
  543. BUG();
  544. spin_unlock_bh(&call->lock);
  545. /* dissociate from the socket
  546. * - the socket's ref on the call is passed to the death timer
  547. */
  548. _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
  549. write_lock_bh(&rx->call_lock);
  550. if (!list_empty(&call->accept_link)) {
  551. _debug("unlinking once-pending call %p { e=%lx f=%lx }",
  552. call, call->events, call->flags);
  553. ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
  554. list_del_init(&call->accept_link);
  555. sk_acceptq_removed(&rx->sk);
  556. } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  557. rb_erase(&call->sock_node, &rx->calls);
  558. memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
  559. clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
  560. }
  561. write_unlock_bh(&rx->call_lock);
  562. /* free up the channel for reuse */
  563. spin_lock(&conn->trans->client_lock);
  564. write_lock_bh(&conn->lock);
  565. write_lock(&call->state_lock);
  566. if (conn->channels[call->channel] == call)
  567. conn->channels[call->channel] = NULL;
  568. if (conn->out_clientflag && conn->bundle) {
  569. conn->avail_calls++;
  570. switch (conn->avail_calls) {
  571. case 1:
  572. list_move_tail(&conn->bundle_link,
  573. &conn->bundle->avail_conns);
  574. case 2 ... RXRPC_MAXCALLS - 1:
  575. ASSERT(conn->channels[0] == NULL ||
  576. conn->channels[1] == NULL ||
  577. conn->channels[2] == NULL ||
  578. conn->channels[3] == NULL);
  579. break;
  580. case RXRPC_MAXCALLS:
  581. list_move_tail(&conn->bundle_link,
  582. &conn->bundle->unused_conns);
  583. ASSERT(conn->channels[0] == NULL &&
  584. conn->channels[1] == NULL &&
  585. conn->channels[2] == NULL &&
  586. conn->channels[3] == NULL);
  587. break;
  588. default:
  589. printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n",
  590. conn->avail_calls);
  591. BUG();
  592. }
  593. }
  594. spin_unlock(&conn->trans->client_lock);
  595. if (call->state < RXRPC_CALL_COMPLETE &&
  596. call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
  597. _debug("+++ ABORTING STATE %d +++\n", call->state);
  598. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  599. call->abort_code = RX_CALL_DEAD;
  600. set_bit(RXRPC_CALL_ABORT, &call->events);
  601. rxrpc_queue_call(call);
  602. }
  603. write_unlock(&call->state_lock);
  604. write_unlock_bh(&conn->lock);
  605. /* clean up the Rx queue */
  606. if (!skb_queue_empty(&call->rx_queue) ||
  607. !skb_queue_empty(&call->rx_oos_queue)) {
  608. struct rxrpc_skb_priv *sp;
  609. struct sk_buff *skb;
  610. _debug("purge Rx queues");
  611. spin_lock_bh(&call->lock);
  612. while ((skb = skb_dequeue(&call->rx_queue)) ||
  613. (skb = skb_dequeue(&call->rx_oos_queue))) {
  614. sp = rxrpc_skb(skb);
  615. if (sp->call) {
  616. ASSERTCMP(sp->call, ==, call);
  617. rxrpc_put_call(call);
  618. sp->call = NULL;
  619. }
  620. skb->destructor = NULL;
  621. spin_unlock_bh(&call->lock);
  622. _debug("- zap %s %%%u #%u",
  623. rxrpc_pkts[sp->hdr.type],
  624. ntohl(sp->hdr.serial),
  625. ntohl(sp->hdr.seq));
  626. rxrpc_free_skb(skb);
  627. spin_lock_bh(&call->lock);
  628. }
  629. spin_unlock_bh(&call->lock);
  630. ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
  631. }
  632. del_timer_sync(&call->resend_timer);
  633. del_timer_sync(&call->ack_timer);
  634. del_timer_sync(&call->lifetimer);
  635. call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
  636. add_timer(&call->deadspan);
  637. _leave("");
  638. }
  639. /*
  640. * handle a dead call being ready for reaping
  641. */
  642. static void rxrpc_dead_call_expired(unsigned long _call)
  643. {
  644. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  645. _enter("{%d}", call->debug_id);
  646. write_lock_bh(&call->state_lock);
  647. call->state = RXRPC_CALL_DEAD;
  648. write_unlock_bh(&call->state_lock);
  649. rxrpc_put_call(call);
  650. }
  651. /*
  652. * mark a call as to be released, aborting it if it's still in progress
  653. * - called with softirqs disabled
  654. */
  655. static void rxrpc_mark_call_released(struct rxrpc_call *call)
  656. {
  657. bool sched;
  658. write_lock(&call->state_lock);
  659. if (call->state < RXRPC_CALL_DEAD) {
  660. sched = false;
  661. if (call->state < RXRPC_CALL_COMPLETE) {
  662. _debug("abort call %p", call);
  663. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  664. call->abort_code = RX_CALL_DEAD;
  665. if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
  666. sched = true;
  667. }
  668. if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  669. sched = true;
  670. if (sched)
  671. rxrpc_queue_call(call);
  672. }
  673. write_unlock(&call->state_lock);
  674. }
  675. /*
  676. * release all the calls associated with a socket
  677. */
  678. void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
  679. {
  680. struct rxrpc_call *call;
  681. struct rb_node *p;
  682. _enter("%p", rx);
  683. read_lock_bh(&rx->call_lock);
  684. /* mark all the calls as no longer wanting incoming packets */
  685. for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
  686. call = rb_entry(p, struct rxrpc_call, sock_node);
  687. rxrpc_mark_call_released(call);
  688. }
  689. /* kill the not-yet-accepted incoming calls */
  690. list_for_each_entry(call, &rx->secureq, accept_link) {
  691. rxrpc_mark_call_released(call);
  692. }
  693. list_for_each_entry(call, &rx->acceptq, accept_link) {
  694. rxrpc_mark_call_released(call);
  695. }
  696. read_unlock_bh(&rx->call_lock);
  697. _leave("");
  698. }
  699. /*
  700. * release a call
  701. */
  702. void __rxrpc_put_call(struct rxrpc_call *call)
  703. {
  704. ASSERT(call != NULL);
  705. _enter("%p{u=%d}", call, atomic_read(&call->usage));
  706. ASSERTCMP(atomic_read(&call->usage), >, 0);
  707. if (atomic_dec_and_test(&call->usage)) {
  708. _debug("call %d dead", call->debug_id);
  709. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  710. rxrpc_queue_work(&call->destroyer);
  711. }
  712. _leave("");
  713. }
  714. /*
  715. * clean up a call
  716. */
  717. static void rxrpc_cleanup_call(struct rxrpc_call *call)
  718. {
  719. _net("DESTROY CALL %d", call->debug_id);
  720. ASSERT(call->socket);
  721. memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
  722. del_timer_sync(&call->lifetimer);
  723. del_timer_sync(&call->deadspan);
  724. del_timer_sync(&call->ack_timer);
  725. del_timer_sync(&call->resend_timer);
  726. ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
  727. ASSERTCMP(call->events, ==, 0);
  728. if (work_pending(&call->processor)) {
  729. _debug("defer destroy");
  730. rxrpc_queue_work(&call->destroyer);
  731. return;
  732. }
  733. if (call->conn) {
  734. spin_lock(&call->conn->trans->peer->lock);
  735. list_del(&call->error_link);
  736. spin_unlock(&call->conn->trans->peer->lock);
  737. write_lock_bh(&call->conn->lock);
  738. rb_erase(&call->conn_node, &call->conn->calls);
  739. write_unlock_bh(&call->conn->lock);
  740. rxrpc_put_connection(call->conn);
  741. }
  742. /* Remove the call from the hash */
  743. rxrpc_call_hash_del(call);
  744. if (call->acks_window) {
  745. _debug("kill Tx window %d",
  746. CIRC_CNT(call->acks_head, call->acks_tail,
  747. call->acks_winsz));
  748. smp_mb();
  749. while (CIRC_CNT(call->acks_head, call->acks_tail,
  750. call->acks_winsz) > 0) {
  751. struct rxrpc_skb_priv *sp;
  752. unsigned long _skb;
  753. _skb = call->acks_window[call->acks_tail] & ~1;
  754. sp = rxrpc_skb((struct sk_buff *) _skb);
  755. _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
  756. rxrpc_free_skb((struct sk_buff *) _skb);
  757. call->acks_tail =
  758. (call->acks_tail + 1) & (call->acks_winsz - 1);
  759. }
  760. kfree(call->acks_window);
  761. }
  762. rxrpc_free_skb(call->tx_pending);
  763. rxrpc_purge_queue(&call->rx_queue);
  764. ASSERT(skb_queue_empty(&call->rx_oos_queue));
  765. sock_put(&call->socket->sk);
  766. kmem_cache_free(rxrpc_call_jar, call);
  767. }
  768. /*
  769. * destroy a call
  770. */
  771. static void rxrpc_destroy_call(struct work_struct *work)
  772. {
  773. struct rxrpc_call *call =
  774. container_of(work, struct rxrpc_call, destroyer);
  775. _enter("%p{%d,%d,%p}",
  776. call, atomic_read(&call->usage), call->channel, call->conn);
  777. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  778. write_lock_bh(&rxrpc_call_lock);
  779. list_del_init(&call->link);
  780. write_unlock_bh(&rxrpc_call_lock);
  781. rxrpc_cleanup_call(call);
  782. _leave("");
  783. }
  784. /*
  785. * preemptively destroy all the call records from a transport endpoint rather
  786. * than waiting for them to time out
  787. */
  788. void __exit rxrpc_destroy_all_calls(void)
  789. {
  790. struct rxrpc_call *call;
  791. _enter("");
  792. write_lock_bh(&rxrpc_call_lock);
  793. while (!list_empty(&rxrpc_calls)) {
  794. call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
  795. _debug("Zapping call %p", call);
  796. list_del_init(&call->link);
  797. switch (atomic_read(&call->usage)) {
  798. case 0:
  799. ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
  800. break;
  801. case 1:
  802. if (del_timer_sync(&call->deadspan) != 0 &&
  803. call->state != RXRPC_CALL_DEAD)
  804. rxrpc_dead_call_expired((unsigned long) call);
  805. if (call->state != RXRPC_CALL_DEAD)
  806. break;
  807. default:
  808. printk(KERN_ERR "RXRPC:"
  809. " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
  810. call, atomic_read(&call->usage),
  811. atomic_read(&call->ackr_not_idle),
  812. rxrpc_call_states[call->state],
  813. call->flags, call->events);
  814. if (!skb_queue_empty(&call->rx_queue))
  815. printk(KERN_ERR"RXRPC: Rx queue occupied\n");
  816. if (!skb_queue_empty(&call->rx_oos_queue))
  817. printk(KERN_ERR"RXRPC: OOS queue occupied\n");
  818. break;
  819. }
  820. write_unlock_bh(&rxrpc_call_lock);
  821. cond_resched();
  822. write_lock_bh(&rxrpc_call_lock);
  823. }
  824. write_unlock_bh(&rxrpc_call_lock);
  825. _leave("");
  826. }
  827. /*
  828. * handle call lifetime being exceeded
  829. */
  830. static void rxrpc_call_life_expired(unsigned long _call)
  831. {
  832. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  833. if (call->state >= RXRPC_CALL_COMPLETE)
  834. return;
  835. _enter("{%d}", call->debug_id);
  836. read_lock_bh(&call->state_lock);
  837. if (call->state < RXRPC_CALL_COMPLETE) {
  838. set_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
  839. rxrpc_queue_call(call);
  840. }
  841. read_unlock_bh(&call->state_lock);
  842. }
  843. /*
  844. * handle resend timer expiry
  845. * - may not take call->state_lock as this can deadlock against del_timer_sync()
  846. */
  847. static void rxrpc_resend_time_expired(unsigned long _call)
  848. {
  849. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  850. _enter("{%d}", call->debug_id);
  851. if (call->state >= RXRPC_CALL_COMPLETE)
  852. return;
  853. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  854. if (!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  855. rxrpc_queue_call(call);
  856. }
  857. /*
  858. * handle ACK timer expiry
  859. */
  860. static void rxrpc_ack_time_expired(unsigned long _call)
  861. {
  862. struct rxrpc_call *call = (struct rxrpc_call *) _call;
  863. _enter("{%d}", call->debug_id);
  864. if (call->state >= RXRPC_CALL_COMPLETE)
  865. return;
  866. read_lock_bh(&call->state_lock);
  867. if (call->state < RXRPC_CALL_COMPLETE &&
  868. !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
  869. rxrpc_queue_call(call);
  870. read_unlock_bh(&call->state_lock);
  871. }