rtllib_tx.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989
  1. /******************************************************************************
  2. Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
  3. This program is free software; you can redistribute it and/or modify it
  4. under the terms of version 2 of the GNU General Public License as
  5. published by the Free Software Foundation.
  6. This program is distributed in the hope that it will be useful, but WITHOUT
  7. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  8. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  9. more details.
  10. The full GNU General Public License is included in this distribution in the
  11. file called LICENSE.
  12. Contact Information:
  13. James P. Ketrenos <ipw2100-admin@linux.intel.com>
  14. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  15. ******************************************************************************
  16. Few modifications for Realtek's Wi-Fi drivers by
  17. Andrea Merello <andrea.merello@gmail.com>
  18. A special thanks goes to Realtek for their support !
  19. ******************************************************************************/
  20. #include <linux/compiler.h>
  21. #include <linux/errno.h>
  22. #include <linux/if_arp.h>
  23. #include <linux/in6.h>
  24. #include <linux/in.h>
  25. #include <linux/ip.h>
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/pci.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/skbuff.h>
  32. #include <linux/slab.h>
  33. #include <linux/tcp.h>
  34. #include <linux/types.h>
  35. #include <linux/wireless.h>
  36. #include <linux/etherdevice.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/if_vlan.h>
  39. #include "rtllib.h"
  40. /* 802.11 Data Frame
  41. *
  42. *
  43. * 802.11 frame_control for data frames - 2 bytes
  44. * ,--------------------------------------------------------------------.
  45. * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
  46. * |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
  47. * val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
  48. * |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
  49. * desc | ver | type | ^-subtype-^ |to |from|more|retry| pwr |more |wep |
  50. * | | | x=0 data |DS | DS |frag| | mgm |data | |
  51. * | | | x=1 data+ack | | | | | | | |
  52. * '--------------------------------------------------------------------'
  53. * /\
  54. * |
  55. * 802.11 Data Frame |
  56. * ,--------- 'ctrl' expands to >---'
  57. * |
  58. * ,--'---,-------------------------------------------------------------.
  59. * Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
  60. * |------|------|---------|---------|---------|------|---------|------|
  61. * Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
  62. * | | tion | (BSSID) | | | ence | data | |
  63. * `--------------------------------------------------| |------'
  64. * Total: 28 non-data bytes `----.----'
  65. * |
  66. * .- 'Frame data' expands to <---------------------------'
  67. * |
  68. * V
  69. * ,---------------------------------------------------.
  70. * Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
  71. * |------|------|---------|----------|------|---------|
  72. * Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
  73. * | DSAP | SSAP | | | | Packet |
  74. * | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
  75. * `-----------------------------------------| |
  76. * Total: 8 non-data bytes `----.----'
  77. * |
  78. * .- 'IP Packet' expands, if WEP enabled, to <--'
  79. * |
  80. * V
  81. * ,-----------------------.
  82. * Bytes | 4 | 0-2296 | 4 |
  83. * |-----|-----------|-----|
  84. * Desc. | IV | Encrypted | ICV |
  85. * | | IP Packet | |
  86. * `-----------------------'
  87. * Total: 8 non-data bytes
  88. *
  89. *
  90. * 802.3 Ethernet Data Frame
  91. *
  92. * ,-----------------------------------------.
  93. * Bytes | 6 | 6 | 2 | Variable | 4 |
  94. * |-------|-------|------|-----------|------|
  95. * Desc. | Dest. | Source| Type | IP Packet | fcs |
  96. * | MAC | MAC | | | |
  97. * `-----------------------------------------'
  98. * Total: 18 non-data bytes
  99. *
  100. * In the event that fragmentation is required, the incoming payload is split
  101. * into N parts of size ieee->fts. The first fragment contains the SNAP header
  102. * and the remaining packets are just data.
  103. *
  104. * If encryption is enabled, each fragment payload size is reduced by enough
  105. * space to add the prefix and postfix (IV and ICV totalling 8 bytes in
  106. * the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to
  107. * 500 without encryption it will take 3 frames. With WEP it will take 4 frames
  108. * as the payload of each frame is reduced to 492 bytes.
  109. *
  110. * SKB visualization
  111. *
  112. * ,- skb->data
  113. * |
  114. * | ETHERNET HEADER ,-<-- PAYLOAD
  115. * | | 14 bytes from skb->data
  116. * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
  117. * | | | |
  118. * |,-Dest.--. ,--Src.---. | | |
  119. * | 6 bytes| | 6 bytes | | | |
  120. * v | | | | | |
  121. * 0 | v 1 | v | v 2
  122. * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
  123. * ^ | ^ | ^ |
  124. * | | | | | |
  125. * | | | | `T' <---- 2 bytes for Type
  126. * | | | |
  127. * | | '---SNAP--' <-------- 6 bytes for SNAP
  128. * | |
  129. * `-IV--' <-------------------- 4 bytes for IV (WEP)
  130. *
  131. * SNAP HEADER
  132. *
  133. */
  134. static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
  135. static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
  136. static int rtllib_put_snap(u8 *data, u16 h_proto)
  137. {
  138. struct rtllib_snap_hdr *snap;
  139. u8 *oui;
  140. snap = (struct rtllib_snap_hdr *)data;
  141. snap->dsap = 0xaa;
  142. snap->ssap = 0xaa;
  143. snap->ctrl = 0x03;
  144. if (h_proto == 0x8137 || h_proto == 0x80f3)
  145. oui = P802_1H_OUI;
  146. else
  147. oui = RFC1042_OUI;
  148. snap->oui[0] = oui[0];
  149. snap->oui[1] = oui[1];
  150. snap->oui[2] = oui[2];
  151. *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
  152. return SNAP_SIZE + sizeof(u16);
  153. }
  154. int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
  155. int hdr_len)
  156. {
  157. struct lib80211_crypt_data *crypt = NULL;
  158. int res;
  159. crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
  160. if (!(crypt && crypt->ops)) {
  161. netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
  162. __func__);
  163. return -1;
  164. }
  165. /* To encrypt, frame format is:
  166. * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
  167. */
  168. /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
  169. * call both MSDU and MPDU encryption functions from here.
  170. */
  171. atomic_inc(&crypt->refcnt);
  172. res = 0;
  173. if (crypt->ops->encrypt_msdu)
  174. res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
  175. if (res == 0 && crypt->ops->encrypt_mpdu)
  176. res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
  177. atomic_dec(&crypt->refcnt);
  178. if (res < 0) {
  179. netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
  180. ieee->dev->name, frag->len);
  181. return -1;
  182. }
  183. return 0;
  184. }
  185. void rtllib_txb_free(struct rtllib_txb *txb)
  186. {
  187. if (unlikely(!txb))
  188. return;
  189. kfree(txb);
  190. }
  191. static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
  192. gfp_t gfp_mask)
  193. {
  194. struct rtllib_txb *txb;
  195. int i;
  196. txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
  197. gfp_mask);
  198. if (!txb)
  199. return NULL;
  200. memset(txb, 0, sizeof(struct rtllib_txb));
  201. txb->nr_frags = nr_frags;
  202. txb->frag_size = cpu_to_le16(txb_size);
  203. for (i = 0; i < nr_frags; i++) {
  204. txb->fragments[i] = dev_alloc_skb(txb_size);
  205. if (unlikely(!txb->fragments[i])) {
  206. i--;
  207. break;
  208. }
  209. memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
  210. }
  211. if (unlikely(i != nr_frags)) {
  212. while (i >= 0)
  213. dev_kfree_skb_any(txb->fragments[i--]);
  214. kfree(txb);
  215. return NULL;
  216. }
  217. return txb;
  218. }
  219. static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
  220. {
  221. struct ethhdr *eth;
  222. struct iphdr *ip;
  223. eth = (struct ethhdr *)skb->data;
  224. if (eth->h_proto != htons(ETH_P_IP))
  225. return 0;
  226. #ifdef VERBOSE_DEBUG
  227. print_hex_dump_bytes("rtllib_classify(): ", DUMP_PREFIX_NONE, skb->data,
  228. skb->len);
  229. #endif
  230. ip = ip_hdr(skb);
  231. switch (ip->tos & 0xfc) {
  232. case 0x20:
  233. return 2;
  234. case 0x40:
  235. return 1;
  236. case 0x60:
  237. return 3;
  238. case 0x80:
  239. return 4;
  240. case 0xa0:
  241. return 5;
  242. case 0xc0:
  243. return 6;
  244. case 0xe0:
  245. return 7;
  246. default:
  247. return 0;
  248. }
  249. }
  250. static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
  251. struct sk_buff *skb,
  252. struct cb_desc *tcb_desc)
  253. {
  254. struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
  255. struct tx_ts_record *pTxTs = NULL;
  256. struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
  257. if (rtllib_act_scanning(ieee, false))
  258. return;
  259. if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
  260. return;
  261. if (!IsQoSDataFrame(skb->data))
  262. return;
  263. if (is_multicast_ether_addr(hdr->addr1))
  264. return;
  265. if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
  266. return;
  267. if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
  268. return;
  269. if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
  270. return;
  271. if (pHTInfo->bCurrentAMPDUEnable) {
  272. if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
  273. skb->priority, TX_DIR, true)) {
  274. netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
  275. return;
  276. }
  277. if (pTxTs->TxAdmittedBARecord.bValid == false) {
  278. if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
  279. KEY_TYPE_NA)) {
  280. ;
  281. } else if (tcb_desc->bdhcp == 1) {
  282. ;
  283. } else if (!pTxTs->bDisable_AddBa) {
  284. TsStartAddBaProcess(ieee, pTxTs);
  285. }
  286. goto FORCED_AGG_SETTING;
  287. } else if (pTxTs->bUsingBa == false) {
  288. if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
  289. (pTxTs->TxCurSeq+1)%4096))
  290. pTxTs->bUsingBa = true;
  291. else
  292. goto FORCED_AGG_SETTING;
  293. }
  294. if (ieee->iw_mode == IW_MODE_INFRA) {
  295. tcb_desc->bAMPDUEnable = true;
  296. tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
  297. tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
  298. }
  299. }
  300. FORCED_AGG_SETTING:
  301. switch (pHTInfo->ForcedAMPDUMode) {
  302. case HT_AGG_AUTO:
  303. break;
  304. case HT_AGG_FORCE_ENABLE:
  305. tcb_desc->bAMPDUEnable = true;
  306. tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
  307. tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
  308. break;
  309. case HT_AGG_FORCE_DISABLE:
  310. tcb_desc->bAMPDUEnable = false;
  311. tcb_desc->ampdu_density = 0;
  312. tcb_desc->ampdu_factor = 0;
  313. break;
  314. }
  315. }
  316. static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
  317. struct cb_desc *tcb_desc)
  318. {
  319. tcb_desc->bUseShortPreamble = false;
  320. if (tcb_desc->data_rate == 2)
  321. return;
  322. else if (ieee->current_network.capability &
  323. WLAN_CAPABILITY_SHORT_PREAMBLE)
  324. tcb_desc->bUseShortPreamble = true;
  325. }
  326. static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
  327. struct cb_desc *tcb_desc)
  328. {
  329. struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
  330. tcb_desc->bUseShortGI = false;
  331. if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
  332. return;
  333. if (pHTInfo->bForcedShortGI) {
  334. tcb_desc->bUseShortGI = true;
  335. return;
  336. }
  337. if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
  338. tcb_desc->bUseShortGI = true;
  339. else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
  340. tcb_desc->bUseShortGI = true;
  341. }
  342. static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
  343. struct cb_desc *tcb_desc)
  344. {
  345. struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
  346. tcb_desc->bPacketBW = false;
  347. if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
  348. return;
  349. if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
  350. return;
  351. if ((tcb_desc->data_rate & 0x80) == 0)
  352. return;
  353. if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
  354. !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
  355. tcb_desc->bPacketBW = true;
  356. }
  357. static void rtllib_query_protectionmode(struct rtllib_device *ieee,
  358. struct cb_desc *tcb_desc,
  359. struct sk_buff *skb)
  360. {
  361. struct rt_hi_throughput *pHTInfo;
  362. tcb_desc->bRTSSTBC = false;
  363. tcb_desc->bRTSUseShortGI = false;
  364. tcb_desc->bCTSEnable = false;
  365. tcb_desc->RTSSC = 0;
  366. tcb_desc->bRTSBW = false;
  367. if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
  368. return;
  369. if (is_broadcast_ether_addr(skb->data+16))
  370. return;
  371. if (ieee->mode < IEEE_N_24G) {
  372. if (skb->len > ieee->rts) {
  373. tcb_desc->bRTSEnable = true;
  374. tcb_desc->rts_rate = MGN_24M;
  375. } else if (ieee->current_network.buseprotection) {
  376. tcb_desc->bRTSEnable = true;
  377. tcb_desc->bCTSEnable = true;
  378. tcb_desc->rts_rate = MGN_24M;
  379. }
  380. return;
  381. }
  382. pHTInfo = ieee->pHTInfo;
  383. while (true) {
  384. if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
  385. tcb_desc->bCTSEnable = true;
  386. tcb_desc->rts_rate = MGN_24M;
  387. tcb_desc->bRTSEnable = true;
  388. break;
  389. } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
  390. HT_IOT_ACT_PURE_N_MODE)) {
  391. tcb_desc->bRTSEnable = true;
  392. tcb_desc->rts_rate = MGN_24M;
  393. break;
  394. }
  395. if (ieee->current_network.buseprotection) {
  396. tcb_desc->bRTSEnable = true;
  397. tcb_desc->bCTSEnable = true;
  398. tcb_desc->rts_rate = MGN_24M;
  399. break;
  400. }
  401. if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
  402. u8 HTOpMode = pHTInfo->CurrentOpMode;
  403. if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
  404. HTOpMode == 3)) ||
  405. (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
  406. tcb_desc->rts_rate = MGN_24M;
  407. tcb_desc->bRTSEnable = true;
  408. break;
  409. }
  410. }
  411. if (skb->len > ieee->rts) {
  412. tcb_desc->rts_rate = MGN_24M;
  413. tcb_desc->bRTSEnable = true;
  414. break;
  415. }
  416. if (tcb_desc->bAMPDUEnable) {
  417. tcb_desc->rts_rate = MGN_24M;
  418. tcb_desc->bRTSEnable = false;
  419. break;
  420. }
  421. goto NO_PROTECTION;
  422. }
  423. if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
  424. tcb_desc->bUseShortPreamble = true;
  425. if (ieee->iw_mode == IW_MODE_MASTER)
  426. goto NO_PROTECTION;
  427. return;
  428. NO_PROTECTION:
  429. tcb_desc->bRTSEnable = false;
  430. tcb_desc->bCTSEnable = false;
  431. tcb_desc->rts_rate = 0;
  432. tcb_desc->RTSSC = 0;
  433. tcb_desc->bRTSBW = false;
  434. }
  435. static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
  436. struct cb_desc *tcb_desc)
  437. {
  438. if (ieee->bTxDisableRateFallBack)
  439. tcb_desc->bTxDisableRateFallBack = true;
  440. if (ieee->bTxUseDriverAssingedRate)
  441. tcb_desc->bTxUseDriverAssingedRate = true;
  442. if (!tcb_desc->bTxDisableRateFallBack ||
  443. !tcb_desc->bTxUseDriverAssingedRate) {
  444. if (ieee->iw_mode == IW_MODE_INFRA ||
  445. ieee->iw_mode == IW_MODE_ADHOC)
  446. tcb_desc->RATRIndex = 0;
  447. }
  448. }
  449. static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
  450. u8 *dst)
  451. {
  452. u16 seqnum = 0;
  453. if (is_multicast_ether_addr(dst))
  454. return 0;
  455. if (IsQoSDataFrame(skb->data)) {
  456. struct tx_ts_record *pTS = NULL;
  457. if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
  458. skb->priority, TX_DIR, true))
  459. return 0;
  460. seqnum = pTS->TxCurSeq;
  461. pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
  462. return seqnum;
  463. }
  464. return 0;
  465. }
  466. static int wme_downgrade_ac(struct sk_buff *skb)
  467. {
  468. switch (skb->priority) {
  469. case 6:
  470. case 7:
  471. skb->priority = 5; /* VO -> VI */
  472. return 0;
  473. case 4:
  474. case 5:
  475. skb->priority = 3; /* VI -> BE */
  476. return 0;
  477. case 0:
  478. case 3:
  479. skb->priority = 1; /* BE -> BK */
  480. return 0;
  481. default:
  482. return -1;
  483. }
  484. }
  485. static u8 rtllib_current_rate(struct rtllib_device *ieee)
  486. {
  487. if (ieee->mode & IEEE_MODE_MASK)
  488. return ieee->rate;
  489. if (ieee->HTCurrentOperaRate)
  490. return ieee->HTCurrentOperaRate;
  491. else
  492. return ieee->rate & 0x7F;
  493. }
  494. static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
  495. {
  496. struct rtllib_device *ieee = (struct rtllib_device *)
  497. netdev_priv_rsl(dev);
  498. struct rtllib_txb *txb = NULL;
  499. struct rtllib_hdr_3addrqos *frag_hdr;
  500. int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
  501. unsigned long flags;
  502. struct net_device_stats *stats = &ieee->stats;
  503. int ether_type = 0, encrypt;
  504. int bytes, fc, qos_ctl = 0, hdr_len;
  505. struct sk_buff *skb_frag;
  506. struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
  507. .duration_id = 0,
  508. .seq_ctl = 0,
  509. .qos_ctl = 0
  510. };
  511. int qos_actived = ieee->current_network.qos_data.active;
  512. u8 dest[ETH_ALEN];
  513. u8 src[ETH_ALEN];
  514. struct lib80211_crypt_data *crypt = NULL;
  515. struct cb_desc *tcb_desc;
  516. u8 bIsMulticast = false;
  517. u8 IsAmsdu = false;
  518. bool bdhcp = false;
  519. spin_lock_irqsave(&ieee->lock, flags);
  520. /* If there is no driver handler to take the TXB, don't bother
  521. * creating it...
  522. */
  523. if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
  524. IEEE_SOFTMAC_TX_QUEUE)) ||
  525. ((!ieee->softmac_data_hard_start_xmit &&
  526. (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
  527. netdev_warn(ieee->dev, "No xmit handler.\n");
  528. goto success;
  529. }
  530. if (likely(ieee->raw_tx == 0)) {
  531. if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
  532. netdev_warn(ieee->dev, "skb too small (%d).\n",
  533. skb->len);
  534. goto success;
  535. }
  536. /* Save source and destination addresses */
  537. ether_addr_copy(dest, skb->data);
  538. ether_addr_copy(src, skb->data + ETH_ALEN);
  539. memset(skb->cb, 0, sizeof(skb->cb));
  540. ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
  541. if (ieee->iw_mode == IW_MODE_MONITOR) {
  542. txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
  543. if (unlikely(!txb)) {
  544. netdev_warn(ieee->dev,
  545. "Could not allocate TXB\n");
  546. goto failed;
  547. }
  548. txb->encrypted = 0;
  549. txb->payload_size = cpu_to_le16(skb->len);
  550. memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
  551. skb->len);
  552. goto success;
  553. }
  554. if (skb->len > 282) {
  555. if (ether_type == ETH_P_IP) {
  556. const struct iphdr *ip = (struct iphdr *)
  557. ((u8 *)skb->data+14);
  558. if (ip->protocol == IPPROTO_UDP) {
  559. struct udphdr *udp;
  560. udp = (struct udphdr *)((u8 *)ip +
  561. (ip->ihl << 2));
  562. if (((((u8 *)udp)[1] == 68) &&
  563. (((u8 *)udp)[3] == 67)) ||
  564. ((((u8 *)udp)[1] == 67) &&
  565. (((u8 *)udp)[3] == 68))) {
  566. bdhcp = true;
  567. ieee->LPSDelayCnt = 200;
  568. }
  569. }
  570. } else if (ether_type == ETH_P_ARP) {
  571. netdev_info(ieee->dev,
  572. "=================>DHCP Protocol start tx ARP pkt!!\n");
  573. bdhcp = true;
  574. ieee->LPSDelayCnt =
  575. ieee->current_network.tim.tim_count;
  576. }
  577. }
  578. skb->priority = rtllib_classify(skb, IsAmsdu);
  579. crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
  580. encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
  581. ieee->host_encrypt && crypt && crypt->ops;
  582. if (!encrypt && ieee->ieee802_1x &&
  583. ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
  584. stats->tx_dropped++;
  585. goto success;
  586. }
  587. if (crypt && !encrypt && ether_type == ETH_P_PAE) {
  588. struct eapol *eap = (struct eapol *)(skb->data +
  589. sizeof(struct ethhdr) - SNAP_SIZE -
  590. sizeof(u16));
  591. netdev_dbg(ieee->dev,
  592. "TX: IEEE 802.11 EAPOL frame: %s\n",
  593. eap_get_type(eap->type));
  594. }
  595. /* Advance the SKB to the start of the payload */
  596. skb_pull(skb, sizeof(struct ethhdr));
  597. /* Determine total amount of storage required for TXB packets */
  598. bytes = skb->len + SNAP_SIZE + sizeof(u16);
  599. if (encrypt)
  600. fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
  601. else
  602. fc = RTLLIB_FTYPE_DATA;
  603. if (qos_actived)
  604. fc |= RTLLIB_STYPE_QOS_DATA;
  605. else
  606. fc |= RTLLIB_STYPE_DATA;
  607. if (ieee->iw_mode == IW_MODE_INFRA) {
  608. fc |= RTLLIB_FCTL_TODS;
  609. /* To DS: Addr1 = BSSID, Addr2 = SA,
  610. * Addr3 = DA
  611. */
  612. ether_addr_copy(header.addr1,
  613. ieee->current_network.bssid);
  614. ether_addr_copy(header.addr2, src);
  615. if (IsAmsdu)
  616. ether_addr_copy(header.addr3,
  617. ieee->current_network.bssid);
  618. else
  619. ether_addr_copy(header.addr3, dest);
  620. } else if (ieee->iw_mode == IW_MODE_ADHOC) {
  621. /* not From/To DS: Addr1 = DA, Addr2 = SA,
  622. * Addr3 = BSSID
  623. */
  624. ether_addr_copy(header.addr1, dest);
  625. ether_addr_copy(header.addr2, src);
  626. ether_addr_copy(header.addr3,
  627. ieee->current_network.bssid);
  628. }
  629. bIsMulticast = is_multicast_ether_addr(header.addr1);
  630. header.frame_ctl = cpu_to_le16(fc);
  631. /* Determine fragmentation size based on destination (multicast
  632. * and broadcast are not fragmented)
  633. */
  634. if (bIsMulticast) {
  635. frag_size = MAX_FRAG_THRESHOLD;
  636. qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
  637. } else {
  638. frag_size = ieee->fts;
  639. qos_ctl = 0;
  640. }
  641. if (qos_actived) {
  642. hdr_len = RTLLIB_3ADDR_LEN + 2;
  643. /* in case we are a client verify acm is not set for this ac */
  644. while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
  645. netdev_info(ieee->dev, "skb->priority = %x\n",
  646. skb->priority);
  647. if (wme_downgrade_ac(skb))
  648. break;
  649. netdev_info(ieee->dev, "converted skb->priority = %x\n",
  650. skb->priority);
  651. }
  652. qos_ctl |= skb->priority;
  653. header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
  654. } else {
  655. hdr_len = RTLLIB_3ADDR_LEN;
  656. }
  657. /* Determine amount of payload per fragment. Regardless of if
  658. * this stack is providing the full 802.11 header, one will
  659. * eventually be affixed to this fragment -- so we must account
  660. * for it when determining the amount of payload space.
  661. */
  662. bytes_per_frag = frag_size - hdr_len;
  663. if (ieee->config &
  664. (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
  665. bytes_per_frag -= RTLLIB_FCS_LEN;
  666. /* Each fragment may need to have room for encrypting
  667. * pre/postfix
  668. */
  669. if (encrypt) {
  670. bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
  671. crypt->ops->extra_mpdu_postfix_len +
  672. crypt->ops->extra_msdu_prefix_len +
  673. crypt->ops->extra_msdu_postfix_len;
  674. }
  675. /* Number of fragments is the total bytes_per_frag /
  676. * payload_per_fragment
  677. */
  678. nr_frags = bytes / bytes_per_frag;
  679. bytes_last_frag = bytes % bytes_per_frag;
  680. if (bytes_last_frag)
  681. nr_frags++;
  682. else
  683. bytes_last_frag = bytes_per_frag;
  684. /* When we allocate the TXB we allocate enough space for the
  685. * reserve and full fragment bytes (bytes_per_frag doesn't
  686. * include prefix, postfix, header, FCS, etc.)
  687. */
  688. txb = rtllib_alloc_txb(nr_frags, frag_size +
  689. ieee->tx_headroom, GFP_ATOMIC);
  690. if (unlikely(!txb)) {
  691. netdev_warn(ieee->dev, "Could not allocate TXB\n");
  692. goto failed;
  693. }
  694. txb->encrypted = encrypt;
  695. txb->payload_size = cpu_to_le16(bytes);
  696. if (qos_actived)
  697. txb->queue_index = UP2AC(skb->priority);
  698. else
  699. txb->queue_index = WME_AC_BE;
  700. for (i = 0; i < nr_frags; i++) {
  701. skb_frag = txb->fragments[i];
  702. tcb_desc = (struct cb_desc *)(skb_frag->cb +
  703. MAX_DEV_ADDR_SIZE);
  704. if (qos_actived) {
  705. skb_frag->priority = skb->priority;
  706. tcb_desc->queue_index = UP2AC(skb->priority);
  707. } else {
  708. skb_frag->priority = WME_AC_BE;
  709. tcb_desc->queue_index = WME_AC_BE;
  710. }
  711. skb_reserve(skb_frag, ieee->tx_headroom);
  712. if (encrypt) {
  713. if (ieee->hwsec_active)
  714. tcb_desc->bHwSec = 1;
  715. else
  716. tcb_desc->bHwSec = 0;
  717. skb_reserve(skb_frag,
  718. crypt->ops->extra_mpdu_prefix_len +
  719. crypt->ops->extra_msdu_prefix_len);
  720. } else {
  721. tcb_desc->bHwSec = 0;
  722. }
  723. frag_hdr = (struct rtllib_hdr_3addrqos *)
  724. skb_put(skb_frag, hdr_len);
  725. memcpy(frag_hdr, &header, hdr_len);
  726. /* If this is not the last fragment, then add the
  727. * MOREFRAGS bit to the frame control
  728. */
  729. if (i != nr_frags - 1) {
  730. frag_hdr->frame_ctl = cpu_to_le16(
  731. fc | RTLLIB_FCTL_MOREFRAGS);
  732. bytes = bytes_per_frag;
  733. } else {
  734. /* The last fragment has the remaining length */
  735. bytes = bytes_last_frag;
  736. }
  737. if ((qos_actived) && (!bIsMulticast)) {
  738. frag_hdr->seq_ctl =
  739. cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
  740. header.addr1));
  741. frag_hdr->seq_ctl =
  742. cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
  743. } else {
  744. frag_hdr->seq_ctl =
  745. cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
  746. }
  747. /* Put a SNAP header on the first fragment */
  748. if (i == 0) {
  749. rtllib_put_snap(
  750. skb_put(skb_frag, SNAP_SIZE +
  751. sizeof(u16)), ether_type);
  752. bytes -= SNAP_SIZE + sizeof(u16);
  753. }
  754. memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
  755. /* Advance the SKB... */
  756. skb_pull(skb, bytes);
  757. /* Encryption routine will move the header forward in
  758. * order to insert the IV between the header and the
  759. * payload
  760. */
  761. if (encrypt)
  762. rtllib_encrypt_fragment(ieee, skb_frag,
  763. hdr_len);
  764. if (ieee->config &
  765. (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
  766. skb_put(skb_frag, 4);
  767. }
  768. if ((qos_actived) && (!bIsMulticast)) {
  769. if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
  770. ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
  771. else
  772. ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
  773. } else {
  774. if (ieee->seq_ctrl[0] == 0xFFF)
  775. ieee->seq_ctrl[0] = 0;
  776. else
  777. ieee->seq_ctrl[0]++;
  778. }
  779. } else {
  780. if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
  781. netdev_warn(ieee->dev, "skb too small (%d).\n",
  782. skb->len);
  783. goto success;
  784. }
  785. txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
  786. if (!txb) {
  787. netdev_warn(ieee->dev, "Could not allocate TXB\n");
  788. goto failed;
  789. }
  790. txb->encrypted = 0;
  791. txb->payload_size = cpu_to_le16(skb->len);
  792. memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
  793. skb->len);
  794. }
  795. success:
  796. if (txb) {
  797. struct cb_desc *tcb_desc = (struct cb_desc *)
  798. (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
  799. tcb_desc->bTxEnableFwCalcDur = 1;
  800. tcb_desc->priority = skb->priority;
  801. if (ether_type == ETH_P_PAE) {
  802. if (ieee->pHTInfo->IOTAction &
  803. HT_IOT_ACT_WA_IOT_Broadcom) {
  804. tcb_desc->data_rate =
  805. MgntQuery_TxRateExcludeCCKRates(ieee);
  806. tcb_desc->bTxDisableRateFallBack = false;
  807. } else {
  808. tcb_desc->data_rate = ieee->basic_rate;
  809. tcb_desc->bTxDisableRateFallBack = 1;
  810. }
  811. tcb_desc->RATRIndex = 7;
  812. tcb_desc->bTxUseDriverAssingedRate = 1;
  813. } else {
  814. if (is_multicast_ether_addr(header.addr1))
  815. tcb_desc->bMulticast = 1;
  816. if (is_broadcast_ether_addr(header.addr1))
  817. tcb_desc->bBroadcast = 1;
  818. rtllib_txrate_selectmode(ieee, tcb_desc);
  819. if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
  820. tcb_desc->data_rate = ieee->basic_rate;
  821. else
  822. tcb_desc->data_rate = rtllib_current_rate(ieee);
  823. if (bdhcp) {
  824. if (ieee->pHTInfo->IOTAction &
  825. HT_IOT_ACT_WA_IOT_Broadcom) {
  826. tcb_desc->data_rate =
  827. MgntQuery_TxRateExcludeCCKRates(ieee);
  828. tcb_desc->bTxDisableRateFallBack = false;
  829. } else {
  830. tcb_desc->data_rate = MGN_1M;
  831. tcb_desc->bTxDisableRateFallBack = 1;
  832. }
  833. tcb_desc->RATRIndex = 7;
  834. tcb_desc->bTxUseDriverAssingedRate = 1;
  835. tcb_desc->bdhcp = 1;
  836. }
  837. rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
  838. rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
  839. tcb_desc);
  840. rtllib_query_HTCapShortGI(ieee, tcb_desc);
  841. rtllib_query_BandwidthMode(ieee, tcb_desc);
  842. rtllib_query_protectionmode(ieee, tcb_desc,
  843. txb->fragments[0]);
  844. }
  845. }
  846. spin_unlock_irqrestore(&ieee->lock, flags);
  847. dev_kfree_skb_any(skb);
  848. if (txb) {
  849. if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
  850. dev->stats.tx_packets++;
  851. dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
  852. rtllib_softmac_xmit(txb, ieee);
  853. } else {
  854. if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
  855. stats->tx_packets++;
  856. stats->tx_bytes += le16_to_cpu(txb->payload_size);
  857. return 0;
  858. }
  859. rtllib_txb_free(txb);
  860. }
  861. }
  862. return 0;
  863. failed:
  864. spin_unlock_irqrestore(&ieee->lock, flags);
  865. netif_stop_queue(dev);
  866. stats->tx_errors++;
  867. return 1;
  868. }
  869. int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
  870. {
  871. memset(skb->cb, 0, sizeof(skb->cb));
  872. return rtllib_xmit_inter(skb, dev);
  873. }
  874. EXPORT_SYMBOL(rtllib_xmit);