tx.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. /* This program is free software; you can redistribute it and/or modify
  2. * it under the terms of the GNU General Public License version 2
  3. * as published by the Free Software Foundation.
  4. *
  5. * This program is distributed in the hope that it will be useful,
  6. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. * GNU General Public License for more details.
  9. */
  10. #include <net/6lowpan.h>
  11. #include <net/ieee802154_netdev.h>
  12. #include <net/mac802154.h>
  13. #include "6lowpan_i.h"
  14. #define LOWPAN_FRAG1_HEAD_SIZE 0x4
  15. #define LOWPAN_FRAGN_HEAD_SIZE 0x5
  16. /* don't save pan id, it's intra pan */
  17. struct lowpan_addr {
  18. u8 mode;
  19. union {
  20. /* IPv6 needs big endian here */
  21. __be64 extended_addr;
  22. __be16 short_addr;
  23. } u;
  24. };
  25. struct lowpan_addr_info {
  26. struct lowpan_addr daddr;
  27. struct lowpan_addr saddr;
  28. };
  29. static inline struct
  30. lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
  31. {
  32. WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
  33. return (struct lowpan_addr_info *)(skb->data -
  34. sizeof(struct lowpan_addr_info));
  35. }
  36. /* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
  37. * sockets gives an 8 byte array for addresses only!
  38. *
  39. * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
  40. * sense here. We should disable it, the right use-case would be AF_INET6
  41. * RAW/DGRAM sockets.
  42. */
  43. int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
  44. unsigned short type, const void *_daddr,
  45. const void *_saddr, unsigned int len)
  46. {
  47. const u8 *saddr = _saddr;
  48. const u8 *daddr = _daddr;
  49. struct lowpan_addr_info *info;
  50. if (!daddr)
  51. return -EINVAL;
  52. /* TODO:
  53. * if this package isn't ipv6 one, where should it be routed?
  54. */
  55. if (type != ETH_P_IPV6)
  56. return 0;
  57. if (!saddr)
  58. saddr = ldev->dev_addr;
  59. raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
  60. raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
  61. info = lowpan_skb_priv(skb);
  62. /* TODO: Currently we only support extended_addr */
  63. info->daddr.mode = IEEE802154_ADDR_LONG;
  64. memcpy(&info->daddr.u.extended_addr, daddr,
  65. sizeof(info->daddr.u.extended_addr));
  66. info->saddr.mode = IEEE802154_ADDR_LONG;
  67. memcpy(&info->saddr.u.extended_addr, saddr,
  68. sizeof(info->daddr.u.extended_addr));
  69. return 0;
  70. }
  71. static struct sk_buff*
  72. lowpan_alloc_frag(struct sk_buff *skb, int size,
  73. const struct ieee802154_hdr *master_hdr, bool frag1)
  74. {
  75. struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev;
  76. struct sk_buff *frag;
  77. int rc;
  78. frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
  79. GFP_ATOMIC);
  80. if (likely(frag)) {
  81. frag->dev = wdev;
  82. frag->priority = skb->priority;
  83. skb_reserve(frag, wdev->needed_headroom);
  84. skb_reset_network_header(frag);
  85. *mac_cb(frag) = *mac_cb(skb);
  86. if (frag1) {
  87. memcpy(skb_put(frag, skb->mac_len),
  88. skb_mac_header(skb), skb->mac_len);
  89. } else {
  90. rc = wpan_dev_hard_header(frag, wdev,
  91. &master_hdr->dest,
  92. &master_hdr->source, size);
  93. if (rc < 0) {
  94. kfree_skb(frag);
  95. return ERR_PTR(rc);
  96. }
  97. }
  98. } else {
  99. frag = ERR_PTR(-ENOMEM);
  100. }
  101. return frag;
  102. }
  103. static int
  104. lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
  105. u8 *frag_hdr, int frag_hdrlen,
  106. int offset, int len, bool frag1)
  107. {
  108. struct sk_buff *frag;
  109. raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
  110. frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
  111. if (IS_ERR(frag))
  112. return PTR_ERR(frag);
  113. memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
  114. memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
  115. raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
  116. return dev_queue_xmit(frag);
  117. }
  118. static int
  119. lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
  120. const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
  121. u16 dgram_offset)
  122. {
  123. __be16 frag_tag;
  124. u8 frag_hdr[5];
  125. int frag_cap, frag_len, payload_cap, rc;
  126. int skb_unprocessed, skb_offset;
  127. frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag);
  128. lowpan_dev_info(ldev)->fragment_tag++;
  129. frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
  130. frag_hdr[1] = dgram_size & 0xff;
  131. memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
  132. payload_cap = ieee802154_max_payload(wpan_hdr);
  133. frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
  134. skb_network_header_len(skb), 8);
  135. skb_offset = skb_network_header_len(skb);
  136. skb_unprocessed = skb->len - skb->mac_len - skb_offset;
  137. rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
  138. LOWPAN_FRAG1_HEAD_SIZE, 0,
  139. frag_len + skb_network_header_len(skb),
  140. true);
  141. if (rc) {
  142. pr_debug("%s unable to send FRAG1 packet (tag: %d)",
  143. __func__, ntohs(frag_tag));
  144. goto err;
  145. }
  146. frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
  147. frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
  148. frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
  149. do {
  150. dgram_offset += frag_len;
  151. skb_offset += frag_len;
  152. skb_unprocessed -= frag_len;
  153. frag_len = min(frag_cap, skb_unprocessed);
  154. frag_hdr[4] = dgram_offset >> 3;
  155. rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
  156. LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
  157. frag_len, false);
  158. if (rc) {
  159. pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
  160. __func__, ntohs(frag_tag), skb_offset);
  161. goto err;
  162. }
  163. } while (skb_unprocessed > frag_cap);
  164. ldev->stats.tx_packets++;
  165. ldev->stats.tx_bytes += dgram_size;
  166. consume_skb(skb);
  167. return NET_XMIT_SUCCESS;
  168. err:
  169. kfree_skb(skb);
  170. return rc;
  171. }
  172. static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
  173. u16 *dgram_size, u16 *dgram_offset)
  174. {
  175. struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr;
  176. struct ieee802154_addr sa, da;
  177. struct ieee802154_mac_cb *cb = mac_cb_init(skb);
  178. struct lowpan_addr_info info;
  179. void *daddr, *saddr;
  180. memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
  181. /* TODO: Currently we only support extended_addr */
  182. daddr = &info.daddr.u.extended_addr;
  183. saddr = &info.saddr.u.extended_addr;
  184. *dgram_size = skb->len;
  185. lowpan_header_compress(skb, ldev, daddr, saddr);
  186. /* dgram_offset = (saved bytes after compression) + lowpan header len */
  187. *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
  188. cb->type = IEEE802154_FC_TYPE_DATA;
  189. /* prepare wpan address data */
  190. sa.mode = IEEE802154_ADDR_LONG;
  191. sa.pan_id = wpan_dev->pan_id;
  192. sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
  193. /* intra-PAN communications */
  194. da.pan_id = sa.pan_id;
  195. /* if the destination address is the broadcast address, use the
  196. * corresponding short address
  197. */
  198. if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
  199. da.mode = IEEE802154_ADDR_SHORT;
  200. da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
  201. cb->ackreq = false;
  202. } else {
  203. da.mode = IEEE802154_ADDR_LONG;
  204. da.extended_addr = ieee802154_devaddr_from_raw(daddr);
  205. cb->ackreq = wpan_dev->ackreq;
  206. }
  207. return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa,
  208. 0);
  209. }
  210. netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
  211. {
  212. struct ieee802154_hdr wpan_hdr;
  213. int max_single, ret;
  214. u16 dgram_size, dgram_offset;
  215. pr_debug("package xmit\n");
  216. WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
  217. /* We must take a copy of the skb before we modify/replace the ipv6
  218. * header as the header could be used elsewhere
  219. */
  220. if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
  221. skb_tailroom(skb) < ldev->needed_tailroom)) {
  222. struct sk_buff *nskb;
  223. nskb = skb_copy_expand(skb, ldev->needed_headroom,
  224. ldev->needed_tailroom, GFP_ATOMIC);
  225. if (likely(nskb)) {
  226. consume_skb(skb);
  227. skb = nskb;
  228. } else {
  229. kfree_skb(skb);
  230. return NET_XMIT_DROP;
  231. }
  232. } else {
  233. skb = skb_unshare(skb, GFP_ATOMIC);
  234. if (!skb)
  235. return NET_XMIT_DROP;
  236. }
  237. ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
  238. if (ret < 0) {
  239. kfree_skb(skb);
  240. return NET_XMIT_DROP;
  241. }
  242. if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
  243. kfree_skb(skb);
  244. return NET_XMIT_DROP;
  245. }
  246. max_single = ieee802154_max_payload(&wpan_hdr);
  247. if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
  248. skb->dev = lowpan_dev_info(ldev)->wdev;
  249. ldev->stats.tx_packets++;
  250. ldev->stats.tx_bytes += dgram_size;
  251. return dev_queue_xmit(skb);
  252. } else {
  253. netdev_tx_t rc;
  254. pr_debug("frame is too big, fragmentation is needed\n");
  255. rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
  256. dgram_offset);
  257. return rc < 0 ? NET_XMIT_DROP : rc;
  258. }
  259. }