ar-output.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /* RxRPC packet transmission
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/net.h>
  12. #include <linux/gfp.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/circ_buf.h>
  15. #include <linux/export.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Time till packet resend (in jiffies).
  21. */
  22. unsigned rxrpc_resend_timeout = 4 * HZ;
  23. static int rxrpc_send_data(struct rxrpc_sock *rx,
  24. struct rxrpc_call *call,
  25. struct msghdr *msg, size_t len);
  26. /*
  27. * extract control messages from the sendmsg() control buffer
  28. */
  29. static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
  30. unsigned long *user_call_ID,
  31. enum rxrpc_command *command,
  32. u32 *abort_code,
  33. bool server)
  34. {
  35. struct cmsghdr *cmsg;
  36. int len;
  37. *command = RXRPC_CMD_SEND_DATA;
  38. if (msg->msg_controllen == 0)
  39. return -EINVAL;
  40. for_each_cmsghdr(cmsg, msg) {
  41. if (!CMSG_OK(msg, cmsg))
  42. return -EINVAL;
  43. len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
  44. _debug("CMSG %d, %d, %d",
  45. cmsg->cmsg_level, cmsg->cmsg_type, len);
  46. if (cmsg->cmsg_level != SOL_RXRPC)
  47. continue;
  48. switch (cmsg->cmsg_type) {
  49. case RXRPC_USER_CALL_ID:
  50. if (msg->msg_flags & MSG_CMSG_COMPAT) {
  51. if (len != sizeof(u32))
  52. return -EINVAL;
  53. *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
  54. } else {
  55. if (len != sizeof(unsigned long))
  56. return -EINVAL;
  57. *user_call_ID = *(unsigned long *)
  58. CMSG_DATA(cmsg);
  59. }
  60. _debug("User Call ID %lx", *user_call_ID);
  61. break;
  62. case RXRPC_ABORT:
  63. if (*command != RXRPC_CMD_SEND_DATA)
  64. return -EINVAL;
  65. *command = RXRPC_CMD_SEND_ABORT;
  66. if (len != sizeof(*abort_code))
  67. return -EINVAL;
  68. *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
  69. _debug("Abort %x", *abort_code);
  70. if (*abort_code == 0)
  71. return -EINVAL;
  72. break;
  73. case RXRPC_ACCEPT:
  74. if (*command != RXRPC_CMD_SEND_DATA)
  75. return -EINVAL;
  76. *command = RXRPC_CMD_ACCEPT;
  77. if (len != 0)
  78. return -EINVAL;
  79. if (!server)
  80. return -EISCONN;
  81. break;
  82. default:
  83. return -EINVAL;
  84. }
  85. }
  86. _leave(" = 0");
  87. return 0;
  88. }
  89. /*
  90. * abort a call, sending an ABORT packet to the peer
  91. */
  92. static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
  93. {
  94. write_lock_bh(&call->state_lock);
  95. if (call->state <= RXRPC_CALL_COMPLETE) {
  96. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  97. call->abort_code = abort_code;
  98. set_bit(RXRPC_CALL_ABORT, &call->events);
  99. del_timer_sync(&call->resend_timer);
  100. del_timer_sync(&call->ack_timer);
  101. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  102. clear_bit(RXRPC_CALL_ACK, &call->events);
  103. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  104. rxrpc_queue_call(call);
  105. }
  106. write_unlock_bh(&call->state_lock);
  107. }
  108. /*
  109. * send a message forming part of a client call through an RxRPC socket
  110. * - caller holds the socket locked
  111. * - the socket may be either a client socket or a server socket
  112. */
  113. int rxrpc_client_sendmsg(struct rxrpc_sock *rx, struct rxrpc_transport *trans,
  114. struct msghdr *msg, size_t len)
  115. {
  116. struct rxrpc_conn_bundle *bundle;
  117. enum rxrpc_command cmd;
  118. struct rxrpc_call *call;
  119. unsigned long user_call_ID = 0;
  120. struct key *key;
  121. __be16 service_id;
  122. u32 abort_code = 0;
  123. int ret;
  124. _enter("");
  125. ASSERT(trans != NULL);
  126. ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
  127. false);
  128. if (ret < 0)
  129. return ret;
  130. bundle = NULL;
  131. if (trans) {
  132. service_id = rx->service_id;
  133. if (msg->msg_name) {
  134. DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx,
  135. msg->msg_name);
  136. service_id = htons(srx->srx_service);
  137. }
  138. key = rx->key;
  139. if (key && !rx->key->payload.data[0])
  140. key = NULL;
  141. bundle = rxrpc_get_bundle(rx, trans, key, service_id,
  142. GFP_KERNEL);
  143. if (IS_ERR(bundle))
  144. return PTR_ERR(bundle);
  145. }
  146. call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID,
  147. abort_code == 0, GFP_KERNEL);
  148. if (trans)
  149. rxrpc_put_bundle(trans, bundle);
  150. if (IS_ERR(call)) {
  151. _leave(" = %ld", PTR_ERR(call));
  152. return PTR_ERR(call);
  153. }
  154. _debug("CALL %d USR %lx ST %d on CONN %p",
  155. call->debug_id, call->user_call_ID, call->state, call->conn);
  156. if (call->state >= RXRPC_CALL_COMPLETE) {
  157. /* it's too late for this call */
  158. ret = -ESHUTDOWN;
  159. } else if (cmd == RXRPC_CMD_SEND_ABORT) {
  160. rxrpc_send_abort(call, abort_code);
  161. } else if (cmd != RXRPC_CMD_SEND_DATA) {
  162. ret = -EINVAL;
  163. } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
  164. /* request phase complete for this client call */
  165. ret = -EPROTO;
  166. } else {
  167. ret = rxrpc_send_data(rx, call, msg, len);
  168. }
  169. rxrpc_put_call(call);
  170. _leave(" = %d", ret);
  171. return ret;
  172. }
  173. /**
  174. * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
  175. * @call: The call to send data through
  176. * @msg: The data to send
  177. * @len: The amount of data to send
  178. *
  179. * Allow a kernel service to send data on a call. The call must be in an state
  180. * appropriate to sending data. No control data should be supplied in @msg,
  181. * nor should an address be supplied. MSG_MORE should be flagged if there's
  182. * more data to come, otherwise this data will end the transmission phase.
  183. */
  184. int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
  185. size_t len)
  186. {
  187. int ret;
  188. _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
  189. ASSERTCMP(msg->msg_name, ==, NULL);
  190. ASSERTCMP(msg->msg_control, ==, NULL);
  191. lock_sock(&call->socket->sk);
  192. _debug("CALL %d USR %lx ST %d on CONN %p",
  193. call->debug_id, call->user_call_ID, call->state, call->conn);
  194. if (call->state >= RXRPC_CALL_COMPLETE) {
  195. ret = -ESHUTDOWN; /* it's too late for this call */
  196. } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  197. call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
  198. call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
  199. ret = -EPROTO; /* request phase complete for this client call */
  200. } else {
  201. ret = rxrpc_send_data(call->socket, call, msg, len);
  202. }
  203. release_sock(&call->socket->sk);
  204. _leave(" = %d", ret);
  205. return ret;
  206. }
  207. EXPORT_SYMBOL(rxrpc_kernel_send_data);
  208. /**
  209. * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
  210. * @call: The call to be aborted
  211. * @abort_code: The abort code to stick into the ABORT packet
  212. *
  213. * Allow a kernel service to abort a call, if it's still in an abortable state.
  214. */
  215. void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code)
  216. {
  217. _enter("{%d},%d", call->debug_id, abort_code);
  218. lock_sock(&call->socket->sk);
  219. _debug("CALL %d USR %lx ST %d on CONN %p",
  220. call->debug_id, call->user_call_ID, call->state, call->conn);
  221. if (call->state < RXRPC_CALL_COMPLETE)
  222. rxrpc_send_abort(call, abort_code);
  223. release_sock(&call->socket->sk);
  224. _leave("");
  225. }
  226. EXPORT_SYMBOL(rxrpc_kernel_abort_call);
  227. /*
  228. * send a message through a server socket
  229. * - caller holds the socket locked
  230. */
  231. int rxrpc_server_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
  232. {
  233. enum rxrpc_command cmd;
  234. struct rxrpc_call *call;
  235. unsigned long user_call_ID = 0;
  236. u32 abort_code = 0;
  237. int ret;
  238. _enter("");
  239. ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
  240. true);
  241. if (ret < 0)
  242. return ret;
  243. if (cmd == RXRPC_CMD_ACCEPT) {
  244. call = rxrpc_accept_call(rx, user_call_ID);
  245. if (IS_ERR(call))
  246. return PTR_ERR(call);
  247. rxrpc_put_call(call);
  248. return 0;
  249. }
  250. call = rxrpc_find_server_call(rx, user_call_ID);
  251. if (!call)
  252. return -EBADSLT;
  253. if (call->state >= RXRPC_CALL_COMPLETE) {
  254. ret = -ESHUTDOWN;
  255. goto out;
  256. }
  257. switch (cmd) {
  258. case RXRPC_CMD_SEND_DATA:
  259. if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  260. call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
  261. call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
  262. /* Tx phase not yet begun for this call */
  263. ret = -EPROTO;
  264. break;
  265. }
  266. ret = rxrpc_send_data(rx, call, msg, len);
  267. break;
  268. case RXRPC_CMD_SEND_ABORT:
  269. rxrpc_send_abort(call, abort_code);
  270. break;
  271. default:
  272. BUG();
  273. }
  274. out:
  275. rxrpc_put_call(call);
  276. _leave(" = %d", ret);
  277. return ret;
  278. }
  279. /*
  280. * send a packet through the transport endpoint
  281. */
  282. int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb)
  283. {
  284. struct kvec iov[1];
  285. struct msghdr msg;
  286. int ret, opt;
  287. _enter(",{%d}", skb->len);
  288. iov[0].iov_base = skb->head;
  289. iov[0].iov_len = skb->len;
  290. msg.msg_name = &trans->peer->srx.transport.sin;
  291. msg.msg_namelen = sizeof(trans->peer->srx.transport.sin);
  292. msg.msg_control = NULL;
  293. msg.msg_controllen = 0;
  294. msg.msg_flags = 0;
  295. /* send the packet with the don't fragment bit set if we currently
  296. * think it's small enough */
  297. if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) {
  298. down_read(&trans->local->defrag_sem);
  299. /* send the packet by UDP
  300. * - returns -EMSGSIZE if UDP would have to fragment the packet
  301. * to go out of the interface
  302. * - in which case, we'll have processed the ICMP error
  303. * message and update the peer record
  304. */
  305. ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
  306. iov[0].iov_len);
  307. up_read(&trans->local->defrag_sem);
  308. if (ret == -EMSGSIZE)
  309. goto send_fragmentable;
  310. _leave(" = %d [%u]", ret, trans->peer->maxdata);
  311. return ret;
  312. }
  313. send_fragmentable:
  314. /* attempt to send this message with fragmentation enabled */
  315. _debug("send fragment");
  316. down_write(&trans->local->defrag_sem);
  317. opt = IP_PMTUDISC_DONT;
  318. ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER,
  319. (char *) &opt, sizeof(opt));
  320. if (ret == 0) {
  321. ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
  322. iov[0].iov_len);
  323. opt = IP_PMTUDISC_DO;
  324. kernel_setsockopt(trans->local->socket, SOL_IP,
  325. IP_MTU_DISCOVER, (char *) &opt, sizeof(opt));
  326. }
  327. up_write(&trans->local->defrag_sem);
  328. _leave(" = %d [frag %u]", ret, trans->peer->maxdata);
  329. return ret;
  330. }
  331. /*
  332. * wait for space to appear in the transmit/ACK window
  333. * - caller holds the socket locked
  334. */
  335. static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
  336. struct rxrpc_call *call,
  337. long *timeo)
  338. {
  339. DECLARE_WAITQUEUE(myself, current);
  340. int ret;
  341. _enter(",{%d},%ld",
  342. CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz),
  343. *timeo);
  344. add_wait_queue(&call->tx_waitq, &myself);
  345. for (;;) {
  346. set_current_state(TASK_INTERRUPTIBLE);
  347. ret = 0;
  348. if (CIRC_SPACE(call->acks_head, call->acks_tail,
  349. call->acks_winsz) > 0)
  350. break;
  351. if (signal_pending(current)) {
  352. ret = sock_intr_errno(*timeo);
  353. break;
  354. }
  355. release_sock(&rx->sk);
  356. *timeo = schedule_timeout(*timeo);
  357. lock_sock(&rx->sk);
  358. }
  359. remove_wait_queue(&call->tx_waitq, &myself);
  360. set_current_state(TASK_RUNNING);
  361. _leave(" = %d", ret);
  362. return ret;
  363. }
  364. /*
  365. * attempt to schedule an instant Tx resend
  366. */
  367. static inline void rxrpc_instant_resend(struct rxrpc_call *call)
  368. {
  369. read_lock_bh(&call->state_lock);
  370. if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
  371. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  372. if (call->state < RXRPC_CALL_COMPLETE &&
  373. !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  374. rxrpc_queue_call(call);
  375. }
  376. read_unlock_bh(&call->state_lock);
  377. }
  378. /*
  379. * queue a packet for transmission, set the resend timer and attempt
  380. * to send the packet immediately
  381. */
  382. static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
  383. bool last)
  384. {
  385. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  386. int ret;
  387. _net("queue skb %p [%d]", skb, call->acks_head);
  388. ASSERT(call->acks_window != NULL);
  389. call->acks_window[call->acks_head] = (unsigned long) skb;
  390. smp_wmb();
  391. call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1);
  392. if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
  393. _debug("________awaiting reply/ACK__________");
  394. write_lock_bh(&call->state_lock);
  395. switch (call->state) {
  396. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  397. call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
  398. break;
  399. case RXRPC_CALL_SERVER_ACK_REQUEST:
  400. call->state = RXRPC_CALL_SERVER_SEND_REPLY;
  401. if (!last)
  402. break;
  403. case RXRPC_CALL_SERVER_SEND_REPLY:
  404. call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
  405. break;
  406. default:
  407. break;
  408. }
  409. write_unlock_bh(&call->state_lock);
  410. }
  411. _proto("Tx DATA %%%u { #%u }",
  412. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  413. sp->need_resend = false;
  414. sp->resend_at = jiffies + rxrpc_resend_timeout;
  415. if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
  416. _debug("run timer");
  417. call->resend_timer.expires = sp->resend_at;
  418. add_timer(&call->resend_timer);
  419. }
  420. /* attempt to cancel the rx-ACK timer, deferring reply transmission if
  421. * we're ACK'ing the request phase of an incoming call */
  422. ret = -EAGAIN;
  423. if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
  424. /* the packet may be freed by rxrpc_process_call() before this
  425. * returns */
  426. ret = rxrpc_send_packet(call->conn->trans, skb);
  427. _net("sent skb %p", skb);
  428. } else {
  429. _debug("failed to delete ACK timer");
  430. }
  431. if (ret < 0) {
  432. _debug("need instant resend %d", ret);
  433. sp->need_resend = true;
  434. rxrpc_instant_resend(call);
  435. }
  436. _leave("");
  437. }
  438. /*
  439. * send data through a socket
  440. * - must be called in process context
  441. * - caller holds the socket locked
  442. */
  443. static int rxrpc_send_data(struct rxrpc_sock *rx,
  444. struct rxrpc_call *call,
  445. struct msghdr *msg, size_t len)
  446. {
  447. struct rxrpc_skb_priv *sp;
  448. struct sk_buff *skb;
  449. struct sock *sk = &rx->sk;
  450. long timeo;
  451. bool more;
  452. int ret, copied;
  453. timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  454. /* this should be in poll */
  455. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  456. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  457. return -EPIPE;
  458. more = msg->msg_flags & MSG_MORE;
  459. skb = call->tx_pending;
  460. call->tx_pending = NULL;
  461. copied = 0;
  462. do {
  463. if (!skb) {
  464. size_t size, chunk, max, space;
  465. _debug("alloc");
  466. if (CIRC_SPACE(call->acks_head, call->acks_tail,
  467. call->acks_winsz) <= 0) {
  468. ret = -EAGAIN;
  469. if (msg->msg_flags & MSG_DONTWAIT)
  470. goto maybe_error;
  471. ret = rxrpc_wait_for_tx_window(rx, call,
  472. &timeo);
  473. if (ret < 0)
  474. goto maybe_error;
  475. }
  476. max = call->conn->trans->peer->maxdata;
  477. max -= call->conn->security_size;
  478. max &= ~(call->conn->size_align - 1UL);
  479. chunk = max;
  480. if (chunk > msg_data_left(msg) && !more)
  481. chunk = msg_data_left(msg);
  482. space = chunk + call->conn->size_align;
  483. space &= ~(call->conn->size_align - 1UL);
  484. size = space + call->conn->header_size;
  485. _debug("SIZE: %zu/%zu/%zu", chunk, space, size);
  486. /* create a buffer that we can retain until it's ACK'd */
  487. skb = sock_alloc_send_skb(
  488. sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
  489. if (!skb)
  490. goto maybe_error;
  491. rxrpc_new_skb(skb);
  492. _debug("ALLOC SEND %p", skb);
  493. ASSERTCMP(skb->mark, ==, 0);
  494. _debug("HS: %u", call->conn->header_size);
  495. skb_reserve(skb, call->conn->header_size);
  496. skb->len += call->conn->header_size;
  497. sp = rxrpc_skb(skb);
  498. sp->remain = chunk;
  499. if (sp->remain > skb_tailroom(skb))
  500. sp->remain = skb_tailroom(skb);
  501. _net("skb: hr %d, tr %d, hl %d, rm %d",
  502. skb_headroom(skb),
  503. skb_tailroom(skb),
  504. skb_headlen(skb),
  505. sp->remain);
  506. skb->ip_summed = CHECKSUM_UNNECESSARY;
  507. }
  508. _debug("append");
  509. sp = rxrpc_skb(skb);
  510. /* append next segment of data to the current buffer */
  511. if (msg_data_left(msg) > 0) {
  512. int copy = skb_tailroom(skb);
  513. ASSERTCMP(copy, >, 0);
  514. if (copy > msg_data_left(msg))
  515. copy = msg_data_left(msg);
  516. if (copy > sp->remain)
  517. copy = sp->remain;
  518. _debug("add");
  519. ret = skb_add_data(skb, &msg->msg_iter, copy);
  520. _debug("added");
  521. if (ret < 0)
  522. goto efault;
  523. sp->remain -= copy;
  524. skb->mark += copy;
  525. copied += copy;
  526. }
  527. /* check for the far side aborting the call or a network error
  528. * occurring */
  529. if (call->state > RXRPC_CALL_COMPLETE)
  530. goto call_aborted;
  531. /* add the packet to the send queue if it's now full */
  532. if (sp->remain <= 0 ||
  533. (msg_data_left(msg) == 0 && !more)) {
  534. struct rxrpc_connection *conn = call->conn;
  535. uint32_t seq;
  536. size_t pad;
  537. /* pad out if we're using security */
  538. if (conn->security) {
  539. pad = conn->security_size + skb->mark;
  540. pad = conn->size_align - pad;
  541. pad &= conn->size_align - 1;
  542. _debug("pad %zu", pad);
  543. if (pad)
  544. memset(skb_put(skb, pad), 0, pad);
  545. }
  546. seq = atomic_inc_return(&call->sequence);
  547. sp->hdr.epoch = conn->epoch;
  548. sp->hdr.cid = call->cid;
  549. sp->hdr.callNumber = call->call_id;
  550. sp->hdr.seq = htonl(seq);
  551. sp->hdr.serial =
  552. htonl(atomic_inc_return(&conn->serial));
  553. sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
  554. sp->hdr.userStatus = 0;
  555. sp->hdr.securityIndex = conn->security_ix;
  556. sp->hdr._rsvd = 0;
  557. sp->hdr.serviceId = conn->service_id;
  558. sp->hdr.flags = conn->out_clientflag;
  559. if (msg_data_left(msg) == 0 && !more)
  560. sp->hdr.flags |= RXRPC_LAST_PACKET;
  561. else if (CIRC_SPACE(call->acks_head, call->acks_tail,
  562. call->acks_winsz) > 1)
  563. sp->hdr.flags |= RXRPC_MORE_PACKETS;
  564. if (more && seq & 1)
  565. sp->hdr.flags |= RXRPC_REQUEST_ACK;
  566. ret = rxrpc_secure_packet(
  567. call, skb, skb->mark,
  568. skb->head + sizeof(struct rxrpc_header));
  569. if (ret < 0)
  570. goto out;
  571. memcpy(skb->head, &sp->hdr,
  572. sizeof(struct rxrpc_header));
  573. rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
  574. skb = NULL;
  575. }
  576. } while (msg_data_left(msg) > 0);
  577. success:
  578. ret = copied;
  579. out:
  580. call->tx_pending = skb;
  581. _leave(" = %d", ret);
  582. return ret;
  583. call_aborted:
  584. rxrpc_free_skb(skb);
  585. if (call->state == RXRPC_CALL_NETWORK_ERROR)
  586. ret = call->conn->trans->peer->net_error;
  587. else
  588. ret = -ECONNABORTED;
  589. _leave(" = %d", ret);
  590. return ret;
  591. maybe_error:
  592. if (copied)
  593. goto success;
  594. goto out;
  595. efault:
  596. ret = -EFAULT;
  597. goto out;
  598. }