htt_rx.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "htc.h"
  19. #include "htt.h"
  20. #include "txrx.h"
  21. #include "debug.h"
  22. #include "trace.h"
  23. #include "mac.h"
  24. #include <linux/log2.h>
  25. #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
  26. #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
  27. /* when under memory pressure rx ring refill may fail and needs a retry */
  28. #define HTT_RX_RING_REFILL_RETRY_MS 50
  29. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
  30. static void ath10k_htt_txrx_compl_task(unsigned long ptr);
  31. static struct sk_buff *
  32. ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
  33. {
  34. struct ath10k_skb_rxcb *rxcb;
  35. hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
  36. if (rxcb->paddr == paddr)
  37. return ATH10K_RXCB_SKB(rxcb);
  38. WARN_ON_ONCE(1);
  39. return NULL;
  40. }
  41. static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
  42. {
  43. struct sk_buff *skb;
  44. struct ath10k_skb_rxcb *rxcb;
  45. struct hlist_node *n;
  46. int i;
  47. if (htt->rx_ring.in_ord_rx) {
  48. hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
  49. skb = ATH10K_RXCB_SKB(rxcb);
  50. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  51. skb->len + skb_tailroom(skb),
  52. DMA_FROM_DEVICE);
  53. hash_del(&rxcb->hlist);
  54. dev_kfree_skb_any(skb);
  55. }
  56. } else {
  57. for (i = 0; i < htt->rx_ring.size; i++) {
  58. skb = htt->rx_ring.netbufs_ring[i];
  59. if (!skb)
  60. continue;
  61. rxcb = ATH10K_SKB_RXCB(skb);
  62. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  63. skb->len + skb_tailroom(skb),
  64. DMA_FROM_DEVICE);
  65. dev_kfree_skb_any(skb);
  66. }
  67. }
  68. htt->rx_ring.fill_cnt = 0;
  69. hash_init(htt->rx_ring.skb_table);
  70. memset(htt->rx_ring.netbufs_ring, 0,
  71. htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
  72. }
  73. static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  74. {
  75. struct htt_rx_desc *rx_desc;
  76. struct ath10k_skb_rxcb *rxcb;
  77. struct sk_buff *skb;
  78. dma_addr_t paddr;
  79. int ret = 0, idx;
  80. /* The Full Rx Reorder firmware has no way of telling the host
  81. * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
  82. * To keep things simple make sure ring is always half empty. This
  83. * guarantees there'll be no replenishment overruns possible.
  84. */
  85. BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
  86. idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
  87. while (num > 0) {
  88. skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
  89. if (!skb) {
  90. ret = -ENOMEM;
  91. goto fail;
  92. }
  93. if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
  94. skb_pull(skb,
  95. PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
  96. skb->data);
  97. /* Clear rx_desc attention word before posting to Rx ring */
  98. rx_desc = (struct htt_rx_desc *)skb->data;
  99. rx_desc->attention.flags = __cpu_to_le32(0);
  100. paddr = dma_map_single(htt->ar->dev, skb->data,
  101. skb->len + skb_tailroom(skb),
  102. DMA_FROM_DEVICE);
  103. if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
  104. dev_kfree_skb_any(skb);
  105. ret = -ENOMEM;
  106. goto fail;
  107. }
  108. rxcb = ATH10K_SKB_RXCB(skb);
  109. rxcb->paddr = paddr;
  110. htt->rx_ring.netbufs_ring[idx] = skb;
  111. htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
  112. htt->rx_ring.fill_cnt++;
  113. if (htt->rx_ring.in_ord_rx) {
  114. hash_add(htt->rx_ring.skb_table,
  115. &ATH10K_SKB_RXCB(skb)->hlist,
  116. (u32)paddr);
  117. }
  118. num--;
  119. idx++;
  120. idx &= htt->rx_ring.size_mask;
  121. }
  122. fail:
  123. /*
  124. * Make sure the rx buffer is updated before available buffer
  125. * index to avoid any potential rx ring corruption.
  126. */
  127. mb();
  128. *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
  129. return ret;
  130. }
  131. static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
  132. {
  133. lockdep_assert_held(&htt->rx_ring.lock);
  134. return __ath10k_htt_rx_ring_fill_n(htt, num);
  135. }
  136. static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
  137. {
  138. int ret, num_deficit, num_to_fill;
  139. /* Refilling the whole RX ring buffer proves to be a bad idea. The
  140. * reason is RX may take up significant amount of CPU cycles and starve
  141. * other tasks, e.g. TX on an ethernet device while acting as a bridge
  142. * with ath10k wlan interface. This ended up with very poor performance
  143. * once CPU the host system was overwhelmed with RX on ath10k.
  144. *
  145. * By limiting the number of refills the replenishing occurs
  146. * progressively. This in turns makes use of the fact tasklets are
  147. * processed in FIFO order. This means actual RX processing can starve
  148. * out refilling. If there's not enough buffers on RX ring FW will not
  149. * report RX until it is refilled with enough buffers. This
  150. * automatically balances load wrt to CPU power.
  151. *
  152. * This probably comes at a cost of lower maximum throughput but
  153. * improves the average and stability. */
  154. spin_lock_bh(&htt->rx_ring.lock);
  155. num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
  156. num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
  157. num_deficit -= num_to_fill;
  158. ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
  159. if (ret == -ENOMEM) {
  160. /*
  161. * Failed to fill it to the desired level -
  162. * we'll start a timer and try again next time.
  163. * As long as enough buffers are left in the ring for
  164. * another A-MPDU rx, no special recovery is needed.
  165. */
  166. mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
  167. msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
  168. } else if (num_deficit > 0) {
  169. tasklet_schedule(&htt->rx_replenish_task);
  170. }
  171. spin_unlock_bh(&htt->rx_ring.lock);
  172. }
  173. static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
  174. {
  175. struct ath10k_htt *htt = (struct ath10k_htt *)arg;
  176. ath10k_htt_rx_msdu_buff_replenish(htt);
  177. }
  178. int ath10k_htt_rx_ring_refill(struct ath10k *ar)
  179. {
  180. struct ath10k_htt *htt = &ar->htt;
  181. int ret;
  182. spin_lock_bh(&htt->rx_ring.lock);
  183. ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
  184. htt->rx_ring.fill_cnt));
  185. if (ret)
  186. ath10k_htt_rx_ring_free(htt);
  187. spin_unlock_bh(&htt->rx_ring.lock);
  188. return ret;
  189. }
  190. void ath10k_htt_rx_free(struct ath10k_htt *htt)
  191. {
  192. del_timer_sync(&htt->rx_ring.refill_retry_timer);
  193. tasklet_kill(&htt->rx_replenish_task);
  194. tasklet_kill(&htt->txrx_compl_task);
  195. skb_queue_purge(&htt->tx_compl_q);
  196. skb_queue_purge(&htt->rx_compl_q);
  197. skb_queue_purge(&htt->rx_in_ord_compl_q);
  198. spin_lock_bh(&htt->rx_ring.lock);
  199. ath10k_htt_rx_ring_free(htt);
  200. spin_unlock_bh(&htt->rx_ring.lock);
  201. dma_free_coherent(htt->ar->dev,
  202. (htt->rx_ring.size *
  203. sizeof(htt->rx_ring.paddrs_ring)),
  204. htt->rx_ring.paddrs_ring,
  205. htt->rx_ring.base_paddr);
  206. dma_free_coherent(htt->ar->dev,
  207. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  208. htt->rx_ring.alloc_idx.vaddr,
  209. htt->rx_ring.alloc_idx.paddr);
  210. kfree(htt->rx_ring.netbufs_ring);
  211. }
  212. static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
  213. {
  214. struct ath10k *ar = htt->ar;
  215. int idx;
  216. struct sk_buff *msdu;
  217. lockdep_assert_held(&htt->rx_ring.lock);
  218. if (htt->rx_ring.fill_cnt == 0) {
  219. ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
  220. return NULL;
  221. }
  222. idx = htt->rx_ring.sw_rd_idx.msdu_payld;
  223. msdu = htt->rx_ring.netbufs_ring[idx];
  224. htt->rx_ring.netbufs_ring[idx] = NULL;
  225. htt->rx_ring.paddrs_ring[idx] = 0;
  226. idx++;
  227. idx &= htt->rx_ring.size_mask;
  228. htt->rx_ring.sw_rd_idx.msdu_payld = idx;
  229. htt->rx_ring.fill_cnt--;
  230. dma_unmap_single(htt->ar->dev,
  231. ATH10K_SKB_RXCB(msdu)->paddr,
  232. msdu->len + skb_tailroom(msdu),
  233. DMA_FROM_DEVICE);
  234. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  235. msdu->data, msdu->len + skb_tailroom(msdu));
  236. return msdu;
  237. }
  238. /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
  239. static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
  240. u8 **fw_desc, int *fw_desc_len,
  241. struct sk_buff_head *amsdu)
  242. {
  243. struct ath10k *ar = htt->ar;
  244. int msdu_len, msdu_chaining = 0;
  245. struct sk_buff *msdu;
  246. struct htt_rx_desc *rx_desc;
  247. lockdep_assert_held(&htt->rx_ring.lock);
  248. for (;;) {
  249. int last_msdu, msdu_len_invalid, msdu_chained;
  250. msdu = ath10k_htt_rx_netbuf_pop(htt);
  251. if (!msdu) {
  252. __skb_queue_purge(amsdu);
  253. return -ENOENT;
  254. }
  255. __skb_queue_tail(amsdu, msdu);
  256. rx_desc = (struct htt_rx_desc *)msdu->data;
  257. /* FIXME: we must report msdu payload since this is what caller
  258. * expects now */
  259. skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  260. skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
  261. /*
  262. * Sanity check - confirm the HW is finished filling in the
  263. * rx data.
  264. * If the HW and SW are working correctly, then it's guaranteed
  265. * that the HW's MAC DMA is done before this point in the SW.
  266. * To prevent the case that we handle a stale Rx descriptor,
  267. * just assert for now until we have a way to recover.
  268. */
  269. if (!(__le32_to_cpu(rx_desc->attention.flags)
  270. & RX_ATTENTION_FLAGS_MSDU_DONE)) {
  271. __skb_queue_purge(amsdu);
  272. return -EIO;
  273. }
  274. /*
  275. * Copy the FW rx descriptor for this MSDU from the rx
  276. * indication message into the MSDU's netbuf. HL uses the
  277. * same rx indication message definition as LL, and simply
  278. * appends new info (fields from the HW rx desc, and the
  279. * MSDU payload itself). So, the offset into the rx
  280. * indication message only has to account for the standard
  281. * offset of the per-MSDU FW rx desc info within the
  282. * message, and how many bytes of the per-MSDU FW rx desc
  283. * info have already been consumed. (And the endianness of
  284. * the host, since for a big-endian host, the rx ind
  285. * message contents, including the per-MSDU rx desc bytes,
  286. * were byteswapped during upload.)
  287. */
  288. if (*fw_desc_len > 0) {
  289. rx_desc->fw_desc.info0 = **fw_desc;
  290. /*
  291. * The target is expected to only provide the basic
  292. * per-MSDU rx descriptors. Just to be sure, verify
  293. * that the target has not attached extension data
  294. * (e.g. LRO flow ID).
  295. */
  296. /* or more, if there's extension data */
  297. (*fw_desc)++;
  298. (*fw_desc_len)--;
  299. } else {
  300. /*
  301. * When an oversized AMSDU happened, FW will lost
  302. * some of MSDU status - in this case, the FW
  303. * descriptors provided will be less than the
  304. * actual MSDUs inside this MPDU. Mark the FW
  305. * descriptors so that it will still deliver to
  306. * upper stack, if no CRC error for this MPDU.
  307. *
  308. * FIX THIS - the FW descriptors are actually for
  309. * MSDUs in the end of this A-MSDU instead of the
  310. * beginning.
  311. */
  312. rx_desc->fw_desc.info0 = 0;
  313. }
  314. msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
  315. & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
  316. RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
  317. msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
  318. RX_MSDU_START_INFO0_MSDU_LENGTH);
  319. msdu_chained = rx_desc->frag_info.ring2_more_count;
  320. if (msdu_len_invalid)
  321. msdu_len = 0;
  322. skb_trim(msdu, 0);
  323. skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
  324. msdu_len -= msdu->len;
  325. /* Note: Chained buffers do not contain rx descriptor */
  326. while (msdu_chained--) {
  327. msdu = ath10k_htt_rx_netbuf_pop(htt);
  328. if (!msdu) {
  329. __skb_queue_purge(amsdu);
  330. return -ENOENT;
  331. }
  332. __skb_queue_tail(amsdu, msdu);
  333. skb_trim(msdu, 0);
  334. skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
  335. msdu_len -= msdu->len;
  336. msdu_chaining = 1;
  337. }
  338. last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
  339. RX_MSDU_END_INFO0_LAST_MSDU;
  340. trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
  341. sizeof(*rx_desc) - sizeof(u32));
  342. if (last_msdu)
  343. break;
  344. }
  345. if (skb_queue_empty(amsdu))
  346. msdu_chaining = -1;
  347. /*
  348. * Don't refill the ring yet.
  349. *
  350. * First, the elements popped here are still in use - it is not
  351. * safe to overwrite them until the matching call to
  352. * mpdu_desc_list_next. Second, for efficiency it is preferable to
  353. * refill the rx ring with 1 PPDU's worth of rx buffers (something
  354. * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
  355. * (something like 3 buffers). Consequently, we'll rely on the txrx
  356. * SW to tell us when it is done pulling all the PPDU's rx buffers
  357. * out of the rx ring, and then refill it just once.
  358. */
  359. return msdu_chaining;
  360. }
  361. static void ath10k_htt_rx_replenish_task(unsigned long ptr)
  362. {
  363. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  364. ath10k_htt_rx_msdu_buff_replenish(htt);
  365. }
  366. static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
  367. u32 paddr)
  368. {
  369. struct ath10k *ar = htt->ar;
  370. struct ath10k_skb_rxcb *rxcb;
  371. struct sk_buff *msdu;
  372. lockdep_assert_held(&htt->rx_ring.lock);
  373. msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
  374. if (!msdu)
  375. return NULL;
  376. rxcb = ATH10K_SKB_RXCB(msdu);
  377. hash_del(&rxcb->hlist);
  378. htt->rx_ring.fill_cnt--;
  379. dma_unmap_single(htt->ar->dev, rxcb->paddr,
  380. msdu->len + skb_tailroom(msdu),
  381. DMA_FROM_DEVICE);
  382. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
  383. msdu->data, msdu->len + skb_tailroom(msdu));
  384. return msdu;
  385. }
  386. static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
  387. struct htt_rx_in_ord_ind *ev,
  388. struct sk_buff_head *list)
  389. {
  390. struct ath10k *ar = htt->ar;
  391. struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
  392. struct htt_rx_desc *rxd;
  393. struct sk_buff *msdu;
  394. int msdu_count;
  395. bool is_offload;
  396. u32 paddr;
  397. lockdep_assert_held(&htt->rx_ring.lock);
  398. msdu_count = __le16_to_cpu(ev->msdu_count);
  399. is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  400. while (msdu_count--) {
  401. paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
  402. msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
  403. if (!msdu) {
  404. __skb_queue_purge(list);
  405. return -ENOENT;
  406. }
  407. __skb_queue_tail(list, msdu);
  408. if (!is_offload) {
  409. rxd = (void *)msdu->data;
  410. trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
  411. skb_put(msdu, sizeof(*rxd));
  412. skb_pull(msdu, sizeof(*rxd));
  413. skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
  414. if (!(__le32_to_cpu(rxd->attention.flags) &
  415. RX_ATTENTION_FLAGS_MSDU_DONE)) {
  416. ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
  417. return -EIO;
  418. }
  419. }
  420. msdu_desc++;
  421. }
  422. return 0;
  423. }
  424. int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
  425. {
  426. struct ath10k *ar = htt->ar;
  427. dma_addr_t paddr;
  428. void *vaddr;
  429. size_t size;
  430. struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
  431. htt->rx_confused = false;
  432. /* XXX: The fill level could be changed during runtime in response to
  433. * the host processing latency. Is this really worth it?
  434. */
  435. htt->rx_ring.size = HTT_RX_RING_SIZE;
  436. htt->rx_ring.size_mask = htt->rx_ring.size - 1;
  437. htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
  438. if (!is_power_of_2(htt->rx_ring.size)) {
  439. ath10k_warn(ar, "htt rx ring size is not power of 2\n");
  440. return -EINVAL;
  441. }
  442. htt->rx_ring.netbufs_ring =
  443. kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
  444. GFP_KERNEL);
  445. if (!htt->rx_ring.netbufs_ring)
  446. goto err_netbuf;
  447. size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
  448. vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
  449. if (!vaddr)
  450. goto err_dma_ring;
  451. htt->rx_ring.paddrs_ring = vaddr;
  452. htt->rx_ring.base_paddr = paddr;
  453. vaddr = dma_alloc_coherent(htt->ar->dev,
  454. sizeof(*htt->rx_ring.alloc_idx.vaddr),
  455. &paddr, GFP_DMA);
  456. if (!vaddr)
  457. goto err_dma_idx;
  458. htt->rx_ring.alloc_idx.vaddr = vaddr;
  459. htt->rx_ring.alloc_idx.paddr = paddr;
  460. htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
  461. *htt->rx_ring.alloc_idx.vaddr = 0;
  462. /* Initialize the Rx refill retry timer */
  463. setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
  464. spin_lock_init(&htt->rx_ring.lock);
  465. htt->rx_ring.fill_cnt = 0;
  466. htt->rx_ring.sw_rd_idx.msdu_payld = 0;
  467. hash_init(htt->rx_ring.skb_table);
  468. tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
  469. (unsigned long)htt);
  470. skb_queue_head_init(&htt->tx_compl_q);
  471. skb_queue_head_init(&htt->rx_compl_q);
  472. skb_queue_head_init(&htt->rx_in_ord_compl_q);
  473. tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
  474. (unsigned long)htt);
  475. ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
  476. htt->rx_ring.size, htt->rx_ring.fill_level);
  477. return 0;
  478. err_dma_idx:
  479. dma_free_coherent(htt->ar->dev,
  480. (htt->rx_ring.size *
  481. sizeof(htt->rx_ring.paddrs_ring)),
  482. htt->rx_ring.paddrs_ring,
  483. htt->rx_ring.base_paddr);
  484. err_dma_ring:
  485. kfree(htt->rx_ring.netbufs_ring);
  486. err_netbuf:
  487. return -ENOMEM;
  488. }
  489. static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
  490. enum htt_rx_mpdu_encrypt_type type)
  491. {
  492. switch (type) {
  493. case HTT_RX_MPDU_ENCRYPT_NONE:
  494. return 0;
  495. case HTT_RX_MPDU_ENCRYPT_WEP40:
  496. case HTT_RX_MPDU_ENCRYPT_WEP104:
  497. return IEEE80211_WEP_IV_LEN;
  498. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  499. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  500. return IEEE80211_TKIP_IV_LEN;
  501. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  502. return IEEE80211_CCMP_HDR_LEN;
  503. case HTT_RX_MPDU_ENCRYPT_WEP128:
  504. case HTT_RX_MPDU_ENCRYPT_WAPI:
  505. break;
  506. }
  507. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  508. return 0;
  509. }
  510. #define MICHAEL_MIC_LEN 8
  511. static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
  512. enum htt_rx_mpdu_encrypt_type type)
  513. {
  514. switch (type) {
  515. case HTT_RX_MPDU_ENCRYPT_NONE:
  516. return 0;
  517. case HTT_RX_MPDU_ENCRYPT_WEP40:
  518. case HTT_RX_MPDU_ENCRYPT_WEP104:
  519. return IEEE80211_WEP_ICV_LEN;
  520. case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
  521. case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
  522. return IEEE80211_TKIP_ICV_LEN;
  523. case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
  524. return IEEE80211_CCMP_MIC_LEN;
  525. case HTT_RX_MPDU_ENCRYPT_WEP128:
  526. case HTT_RX_MPDU_ENCRYPT_WAPI:
  527. break;
  528. }
  529. ath10k_warn(ar, "unsupported encryption type %d\n", type);
  530. return 0;
  531. }
  532. struct amsdu_subframe_hdr {
  533. u8 dst[ETH_ALEN];
  534. u8 src[ETH_ALEN];
  535. __be16 len;
  536. } __packed;
  537. #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
  538. static void ath10k_htt_rx_h_rates(struct ath10k *ar,
  539. struct ieee80211_rx_status *status,
  540. struct htt_rx_desc *rxd)
  541. {
  542. struct ieee80211_supported_band *sband;
  543. u8 cck, rate, bw, sgi, mcs, nss;
  544. u8 preamble = 0;
  545. u8 group_id;
  546. u32 info1, info2, info3;
  547. info1 = __le32_to_cpu(rxd->ppdu_start.info1);
  548. info2 = __le32_to_cpu(rxd->ppdu_start.info2);
  549. info3 = __le32_to_cpu(rxd->ppdu_start.info3);
  550. preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
  551. switch (preamble) {
  552. case HTT_RX_LEGACY:
  553. /* To get legacy rate index band is required. Since band can't
  554. * be undefined check if freq is non-zero.
  555. */
  556. if (!status->freq)
  557. return;
  558. cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
  559. rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
  560. rate &= ~RX_PPDU_START_RATE_FLAG;
  561. sband = &ar->mac.sbands[status->band];
  562. status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
  563. break;
  564. case HTT_RX_HT:
  565. case HTT_RX_HT_WITH_TXBF:
  566. /* HT-SIG - Table 20-11 in info2 and info3 */
  567. mcs = info2 & 0x1F;
  568. nss = mcs >> 3;
  569. bw = (info2 >> 7) & 1;
  570. sgi = (info3 >> 7) & 1;
  571. status->rate_idx = mcs;
  572. status->flag |= RX_FLAG_HT;
  573. if (sgi)
  574. status->flag |= RX_FLAG_SHORT_GI;
  575. if (bw)
  576. status->flag |= RX_FLAG_40MHZ;
  577. break;
  578. case HTT_RX_VHT:
  579. case HTT_RX_VHT_WITH_TXBF:
  580. /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
  581. TODO check this */
  582. bw = info2 & 3;
  583. sgi = info3 & 1;
  584. group_id = (info2 >> 4) & 0x3F;
  585. if (GROUP_ID_IS_SU_MIMO(group_id)) {
  586. mcs = (info3 >> 4) & 0x0F;
  587. nss = ((info2 >> 10) & 0x07) + 1;
  588. } else {
  589. /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
  590. * so it's impossible to decode MCS. Also since
  591. * firmware consumes Group Id Management frames host
  592. * has no knowledge regarding group/user position
  593. * mapping so it's impossible to pick the correct Nsts
  594. * from VHT-SIG-A1.
  595. *
  596. * Bandwidth and SGI are valid so report the rateinfo
  597. * on best-effort basis.
  598. */
  599. mcs = 0;
  600. nss = 1;
  601. }
  602. if (mcs > 0x09) {
  603. ath10k_warn(ar, "invalid MCS received %u\n", mcs);
  604. ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
  605. __le32_to_cpu(rxd->attention.flags),
  606. __le32_to_cpu(rxd->mpdu_start.info0),
  607. __le32_to_cpu(rxd->mpdu_start.info1),
  608. __le32_to_cpu(rxd->msdu_start.common.info0),
  609. __le32_to_cpu(rxd->msdu_start.common.info1),
  610. rxd->ppdu_start.info0,
  611. __le32_to_cpu(rxd->ppdu_start.info1),
  612. __le32_to_cpu(rxd->ppdu_start.info2),
  613. __le32_to_cpu(rxd->ppdu_start.info3),
  614. __le32_to_cpu(rxd->ppdu_start.info4));
  615. ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
  616. __le32_to_cpu(rxd->msdu_end.common.info0),
  617. __le32_to_cpu(rxd->mpdu_end.info0));
  618. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
  619. "rx desc msdu payload: ",
  620. rxd->msdu_payload, 50);
  621. }
  622. status->rate_idx = mcs;
  623. status->vht_nss = nss;
  624. if (sgi)
  625. status->flag |= RX_FLAG_SHORT_GI;
  626. switch (bw) {
  627. /* 20MHZ */
  628. case 0:
  629. break;
  630. /* 40MHZ */
  631. case 1:
  632. status->flag |= RX_FLAG_40MHZ;
  633. break;
  634. /* 80MHZ */
  635. case 2:
  636. status->vht_flag |= RX_VHT_FLAG_80MHZ;
  637. }
  638. status->flag |= RX_FLAG_VHT;
  639. break;
  640. default:
  641. break;
  642. }
  643. }
  644. static struct ieee80211_channel *
  645. ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
  646. {
  647. struct ath10k_peer *peer;
  648. struct ath10k_vif *arvif;
  649. struct cfg80211_chan_def def;
  650. u16 peer_id;
  651. lockdep_assert_held(&ar->data_lock);
  652. if (!rxd)
  653. return NULL;
  654. if (rxd->attention.flags &
  655. __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
  656. return NULL;
  657. if (!(rxd->msdu_end.common.info0 &
  658. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
  659. return NULL;
  660. peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  661. RX_MPDU_START_INFO0_PEER_IDX);
  662. peer = ath10k_peer_find_by_id(ar, peer_id);
  663. if (!peer)
  664. return NULL;
  665. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  666. if (WARN_ON_ONCE(!arvif))
  667. return NULL;
  668. if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
  669. return NULL;
  670. return def.chan;
  671. }
  672. static struct ieee80211_channel *
  673. ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
  674. {
  675. struct ath10k_vif *arvif;
  676. struct cfg80211_chan_def def;
  677. lockdep_assert_held(&ar->data_lock);
  678. list_for_each_entry(arvif, &ar->arvifs, list) {
  679. if (arvif->vdev_id == vdev_id &&
  680. ath10k_mac_vif_chan(arvif->vif, &def) == 0)
  681. return def.chan;
  682. }
  683. return NULL;
  684. }
  685. static void
  686. ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
  687. struct ieee80211_chanctx_conf *conf,
  688. void *data)
  689. {
  690. struct cfg80211_chan_def *def = data;
  691. *def = conf->def;
  692. }
  693. static struct ieee80211_channel *
  694. ath10k_htt_rx_h_any_channel(struct ath10k *ar)
  695. {
  696. struct cfg80211_chan_def def = {};
  697. ieee80211_iter_chan_contexts_atomic(ar->hw,
  698. ath10k_htt_rx_h_any_chan_iter,
  699. &def);
  700. return def.chan;
  701. }
  702. static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
  703. struct ieee80211_rx_status *status,
  704. struct htt_rx_desc *rxd,
  705. u32 vdev_id)
  706. {
  707. struct ieee80211_channel *ch;
  708. spin_lock_bh(&ar->data_lock);
  709. ch = ar->scan_channel;
  710. if (!ch)
  711. ch = ar->rx_channel;
  712. if (!ch)
  713. ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
  714. if (!ch)
  715. ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
  716. if (!ch)
  717. ch = ath10k_htt_rx_h_any_channel(ar);
  718. spin_unlock_bh(&ar->data_lock);
  719. if (!ch)
  720. return false;
  721. status->band = ch->band;
  722. status->freq = ch->center_freq;
  723. return true;
  724. }
  725. static void ath10k_htt_rx_h_signal(struct ath10k *ar,
  726. struct ieee80211_rx_status *status,
  727. struct htt_rx_desc *rxd)
  728. {
  729. /* FIXME: Get real NF */
  730. status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
  731. rxd->ppdu_start.rssi_comb;
  732. status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
  733. }
  734. static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
  735. struct ieee80211_rx_status *status,
  736. struct htt_rx_desc *rxd)
  737. {
  738. /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
  739. * means all prior MSDUs in a PPDU are reported to mac80211 without the
  740. * TSF. Is it worth holding frames until end of PPDU is known?
  741. *
  742. * FIXME: Can we get/compute 64bit TSF?
  743. */
  744. status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
  745. status->flag |= RX_FLAG_MACTIME_END;
  746. }
  747. static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
  748. struct sk_buff_head *amsdu,
  749. struct ieee80211_rx_status *status,
  750. u32 vdev_id)
  751. {
  752. struct sk_buff *first;
  753. struct htt_rx_desc *rxd;
  754. bool is_first_ppdu;
  755. bool is_last_ppdu;
  756. if (skb_queue_empty(amsdu))
  757. return;
  758. first = skb_peek(amsdu);
  759. rxd = (void *)first->data - sizeof(*rxd);
  760. is_first_ppdu = !!(rxd->attention.flags &
  761. __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
  762. is_last_ppdu = !!(rxd->attention.flags &
  763. __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
  764. if (is_first_ppdu) {
  765. /* New PPDU starts so clear out the old per-PPDU status. */
  766. status->freq = 0;
  767. status->rate_idx = 0;
  768. status->vht_nss = 0;
  769. status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
  770. status->flag &= ~(RX_FLAG_HT |
  771. RX_FLAG_VHT |
  772. RX_FLAG_SHORT_GI |
  773. RX_FLAG_40MHZ |
  774. RX_FLAG_MACTIME_END);
  775. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  776. ath10k_htt_rx_h_signal(ar, status, rxd);
  777. ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
  778. ath10k_htt_rx_h_rates(ar, status, rxd);
  779. }
  780. if (is_last_ppdu)
  781. ath10k_htt_rx_h_mactime(ar, status, rxd);
  782. }
  783. static const char * const tid_to_ac[] = {
  784. "BE",
  785. "BK",
  786. "BK",
  787. "BE",
  788. "VI",
  789. "VI",
  790. "VO",
  791. "VO",
  792. };
  793. static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
  794. {
  795. u8 *qc;
  796. int tid;
  797. if (!ieee80211_is_data_qos(hdr->frame_control))
  798. return "";
  799. qc = ieee80211_get_qos_ctl(hdr);
  800. tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
  801. if (tid < 8)
  802. snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
  803. else
  804. snprintf(out, size, "tid %d", tid);
  805. return out;
  806. }
  807. static void ath10k_process_rx(struct ath10k *ar,
  808. struct ieee80211_rx_status *rx_status,
  809. struct sk_buff *skb)
  810. {
  811. struct ieee80211_rx_status *status;
  812. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  813. char tid[32];
  814. status = IEEE80211_SKB_RXCB(skb);
  815. *status = *rx_status;
  816. ath10k_dbg(ar, ATH10K_DBG_DATA,
  817. "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
  818. skb,
  819. skb->len,
  820. ieee80211_get_SA(hdr),
  821. ath10k_get_tid(hdr, tid, sizeof(tid)),
  822. is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
  823. "mcast" : "ucast",
  824. (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
  825. status->flag == 0 ? "legacy" : "",
  826. status->flag & RX_FLAG_HT ? "ht" : "",
  827. status->flag & RX_FLAG_VHT ? "vht" : "",
  828. status->flag & RX_FLAG_40MHZ ? "40" : "",
  829. status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
  830. status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
  831. status->rate_idx,
  832. status->vht_nss,
  833. status->freq,
  834. status->band, status->flag,
  835. !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
  836. !!(status->flag & RX_FLAG_MMIC_ERROR),
  837. !!(status->flag & RX_FLAG_AMSDU_MORE));
  838. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
  839. skb->data, skb->len);
  840. trace_ath10k_rx_hdr(ar, skb->data, skb->len);
  841. trace_ath10k_rx_payload(ar, skb->data, skb->len);
  842. ieee80211_rx(ar->hw, skb);
  843. }
  844. static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
  845. struct ieee80211_hdr *hdr)
  846. {
  847. int len = ieee80211_hdrlen(hdr->frame_control);
  848. if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
  849. ar->fw_features))
  850. len = round_up(len, 4);
  851. return len;
  852. }
  853. static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
  854. struct sk_buff *msdu,
  855. struct ieee80211_rx_status *status,
  856. enum htt_rx_mpdu_encrypt_type enctype,
  857. bool is_decrypted)
  858. {
  859. struct ieee80211_hdr *hdr;
  860. struct htt_rx_desc *rxd;
  861. size_t hdr_len;
  862. size_t crypto_len;
  863. bool is_first;
  864. bool is_last;
  865. rxd = (void *)msdu->data - sizeof(*rxd);
  866. is_first = !!(rxd->msdu_end.common.info0 &
  867. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  868. is_last = !!(rxd->msdu_end.common.info0 &
  869. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  870. /* Delivered decapped frame:
  871. * [802.11 header]
  872. * [crypto param] <-- can be trimmed if !fcs_err &&
  873. * !decrypt_err && !peer_idx_invalid
  874. * [amsdu header] <-- only if A-MSDU
  875. * [rfc1042/llc]
  876. * [payload]
  877. * [FCS] <-- at end, needs to be trimmed
  878. */
  879. /* This probably shouldn't happen but warn just in case */
  880. if (unlikely(WARN_ON_ONCE(!is_first)))
  881. return;
  882. /* This probably shouldn't happen but warn just in case */
  883. if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
  884. return;
  885. skb_trim(msdu, msdu->len - FCS_LEN);
  886. /* In most cases this will be true for sniffed frames. It makes sense
  887. * to deliver them as-is without stripping the crypto param. This is
  888. * necessary for software based decryption.
  889. *
  890. * If there's no error then the frame is decrypted. At least that is
  891. * the case for frames that come in via fragmented rx indication.
  892. */
  893. if (!is_decrypted)
  894. return;
  895. /* The payload is decrypted so strip crypto params. Start from tail
  896. * since hdr is used to compute some stuff.
  897. */
  898. hdr = (void *)msdu->data;
  899. /* Tail */
  900. if (status->flag & RX_FLAG_IV_STRIPPED) {
  901. skb_trim(msdu, msdu->len -
  902. ath10k_htt_rx_crypto_tail_len(ar, enctype));
  903. } else {
  904. /* MIC */
  905. if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
  906. enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
  907. skb_trim(msdu, msdu->len - 8);
  908. /* ICV */
  909. if (status->flag & RX_FLAG_ICV_STRIPPED &&
  910. enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
  911. skb_trim(msdu, msdu->len -
  912. ath10k_htt_rx_crypto_tail_len(ar, enctype));
  913. }
  914. /* MMIC */
  915. if (!ieee80211_has_morefrags(hdr->frame_control) &&
  916. enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
  917. skb_trim(msdu, msdu->len - 8);
  918. /* Head */
  919. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  920. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  921. memmove((void *)msdu->data + crypto_len,
  922. (void *)msdu->data, hdr_len);
  923. skb_pull(msdu, crypto_len);
  924. }
  925. static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
  926. struct sk_buff *msdu,
  927. struct ieee80211_rx_status *status,
  928. const u8 first_hdr[64],
  929. enum htt_rx_mpdu_encrypt_type enctype)
  930. {
  931. struct ieee80211_hdr *hdr;
  932. size_t hdr_len;
  933. u8 da[ETH_ALEN];
  934. u8 sa[ETH_ALEN];
  935. int bytes_aligned = ar->hw_params.decap_align_bytes;
  936. /* Delivered decapped frame:
  937. * [nwifi 802.11 header] <-- replaced with 802.11 hdr
  938. * [rfc1042/llc]
  939. *
  940. * Note: The nwifi header doesn't have QoS Control and is
  941. * (always?) a 3addr frame.
  942. *
  943. * Note2: There's no A-MSDU subframe header. Even if it's part
  944. * of an A-MSDU.
  945. */
  946. /* pull decapped header and copy SA & DA */
  947. hdr = (struct ieee80211_hdr *)msdu->data;
  948. hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
  949. ether_addr_copy(da, ieee80211_get_DA(hdr));
  950. ether_addr_copy(sa, ieee80211_get_SA(hdr));
  951. skb_pull(msdu, hdr_len);
  952. /* push original 802.11 header */
  953. hdr = (struct ieee80211_hdr *)first_hdr;
  954. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  955. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  956. memcpy(skb_push(msdu,
  957. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  958. (void *)hdr + round_up(hdr_len, bytes_aligned),
  959. ath10k_htt_rx_crypto_param_len(ar, enctype));
  960. }
  961. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  962. /* original 802.11 header has a different DA and in
  963. * case of 4addr it may also have different SA
  964. */
  965. hdr = (struct ieee80211_hdr *)msdu->data;
  966. ether_addr_copy(ieee80211_get_DA(hdr), da);
  967. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  968. }
  969. static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
  970. struct sk_buff *msdu,
  971. enum htt_rx_mpdu_encrypt_type enctype)
  972. {
  973. struct ieee80211_hdr *hdr;
  974. struct htt_rx_desc *rxd;
  975. size_t hdr_len, crypto_len;
  976. void *rfc1042;
  977. bool is_first, is_last, is_amsdu;
  978. int bytes_aligned = ar->hw_params.decap_align_bytes;
  979. rxd = (void *)msdu->data - sizeof(*rxd);
  980. hdr = (void *)rxd->rx_hdr_status;
  981. is_first = !!(rxd->msdu_end.common.info0 &
  982. __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
  983. is_last = !!(rxd->msdu_end.common.info0 &
  984. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
  985. is_amsdu = !(is_first && is_last);
  986. rfc1042 = hdr;
  987. if (is_first) {
  988. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  989. crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
  990. rfc1042 += round_up(hdr_len, bytes_aligned) +
  991. round_up(crypto_len, bytes_aligned);
  992. }
  993. if (is_amsdu)
  994. rfc1042 += sizeof(struct amsdu_subframe_hdr);
  995. return rfc1042;
  996. }
  997. static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
  998. struct sk_buff *msdu,
  999. struct ieee80211_rx_status *status,
  1000. const u8 first_hdr[64],
  1001. enum htt_rx_mpdu_encrypt_type enctype)
  1002. {
  1003. struct ieee80211_hdr *hdr;
  1004. struct ethhdr *eth;
  1005. size_t hdr_len;
  1006. void *rfc1042;
  1007. u8 da[ETH_ALEN];
  1008. u8 sa[ETH_ALEN];
  1009. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1010. /* Delivered decapped frame:
  1011. * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
  1012. * [payload]
  1013. */
  1014. rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
  1015. if (WARN_ON_ONCE(!rfc1042))
  1016. return;
  1017. /* pull decapped header and copy SA & DA */
  1018. eth = (struct ethhdr *)msdu->data;
  1019. ether_addr_copy(da, eth->h_dest);
  1020. ether_addr_copy(sa, eth->h_source);
  1021. skb_pull(msdu, sizeof(struct ethhdr));
  1022. /* push rfc1042/llc/snap */
  1023. memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
  1024. sizeof(struct rfc1042_hdr));
  1025. /* push original 802.11 header */
  1026. hdr = (struct ieee80211_hdr *)first_hdr;
  1027. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1028. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1029. memcpy(skb_push(msdu,
  1030. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1031. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1032. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1033. }
  1034. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1035. /* original 802.11 header has a different DA and in
  1036. * case of 4addr it may also have different SA
  1037. */
  1038. hdr = (struct ieee80211_hdr *)msdu->data;
  1039. ether_addr_copy(ieee80211_get_DA(hdr), da);
  1040. ether_addr_copy(ieee80211_get_SA(hdr), sa);
  1041. }
  1042. static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
  1043. struct sk_buff *msdu,
  1044. struct ieee80211_rx_status *status,
  1045. const u8 first_hdr[64],
  1046. enum htt_rx_mpdu_encrypt_type enctype)
  1047. {
  1048. struct ieee80211_hdr *hdr;
  1049. size_t hdr_len;
  1050. int bytes_aligned = ar->hw_params.decap_align_bytes;
  1051. /* Delivered decapped frame:
  1052. * [amsdu header] <-- replaced with 802.11 hdr
  1053. * [rfc1042/llc]
  1054. * [payload]
  1055. */
  1056. skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
  1057. hdr = (struct ieee80211_hdr *)first_hdr;
  1058. hdr_len = ieee80211_hdrlen(hdr->frame_control);
  1059. if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
  1060. memcpy(skb_push(msdu,
  1061. ath10k_htt_rx_crypto_param_len(ar, enctype)),
  1062. (void *)hdr + round_up(hdr_len, bytes_aligned),
  1063. ath10k_htt_rx_crypto_param_len(ar, enctype));
  1064. }
  1065. memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
  1066. }
  1067. static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
  1068. struct sk_buff *msdu,
  1069. struct ieee80211_rx_status *status,
  1070. u8 first_hdr[64],
  1071. enum htt_rx_mpdu_encrypt_type enctype,
  1072. bool is_decrypted)
  1073. {
  1074. struct htt_rx_desc *rxd;
  1075. enum rx_msdu_decap_format decap;
  1076. /* First msdu's decapped header:
  1077. * [802.11 header] <-- padded to 4 bytes long
  1078. * [crypto param] <-- padded to 4 bytes long
  1079. * [amsdu header] <-- only if A-MSDU
  1080. * [rfc1042/llc]
  1081. *
  1082. * Other (2nd, 3rd, ..) msdu's decapped header:
  1083. * [amsdu header] <-- only if A-MSDU
  1084. * [rfc1042/llc]
  1085. */
  1086. rxd = (void *)msdu->data - sizeof(*rxd);
  1087. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1088. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1089. switch (decap) {
  1090. case RX_MSDU_DECAP_RAW:
  1091. ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
  1092. is_decrypted);
  1093. break;
  1094. case RX_MSDU_DECAP_NATIVE_WIFI:
  1095. ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
  1096. enctype);
  1097. break;
  1098. case RX_MSDU_DECAP_ETHERNET2_DIX:
  1099. ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
  1100. break;
  1101. case RX_MSDU_DECAP_8023_SNAP_LLC:
  1102. ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
  1103. enctype);
  1104. break;
  1105. }
  1106. }
  1107. static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
  1108. {
  1109. struct htt_rx_desc *rxd;
  1110. u32 flags, info;
  1111. bool is_ip4, is_ip6;
  1112. bool is_tcp, is_udp;
  1113. bool ip_csum_ok, tcpudp_csum_ok;
  1114. rxd = (void *)skb->data - sizeof(*rxd);
  1115. flags = __le32_to_cpu(rxd->attention.flags);
  1116. info = __le32_to_cpu(rxd->msdu_start.common.info1);
  1117. is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
  1118. is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
  1119. is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
  1120. is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
  1121. ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
  1122. tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
  1123. if (!is_ip4 && !is_ip6)
  1124. return CHECKSUM_NONE;
  1125. if (!is_tcp && !is_udp)
  1126. return CHECKSUM_NONE;
  1127. if (!ip_csum_ok)
  1128. return CHECKSUM_NONE;
  1129. if (!tcpudp_csum_ok)
  1130. return CHECKSUM_NONE;
  1131. return CHECKSUM_UNNECESSARY;
  1132. }
  1133. static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
  1134. {
  1135. msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
  1136. }
  1137. static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
  1138. struct sk_buff_head *amsdu,
  1139. struct ieee80211_rx_status *status,
  1140. bool fill_crypt_header)
  1141. {
  1142. struct sk_buff *first;
  1143. struct sk_buff *last;
  1144. struct sk_buff *msdu;
  1145. struct htt_rx_desc *rxd;
  1146. struct ieee80211_hdr *hdr;
  1147. enum htt_rx_mpdu_encrypt_type enctype;
  1148. u8 first_hdr[64];
  1149. u8 *qos;
  1150. bool has_fcs_err;
  1151. bool has_crypto_err;
  1152. bool has_tkip_err;
  1153. bool has_peer_idx_invalid;
  1154. bool is_decrypted;
  1155. u32 attention;
  1156. if (skb_queue_empty(amsdu))
  1157. return;
  1158. first = skb_peek(amsdu);
  1159. rxd = (void *)first->data - sizeof(*rxd);
  1160. enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
  1161. RX_MPDU_START_INFO0_ENCRYPT_TYPE);
  1162. /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
  1163. * decapped header. It'll be used for undecapping of each MSDU.
  1164. */
  1165. hdr = (void *)rxd->rx_hdr_status;
  1166. memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
  1167. /* Each A-MSDU subframe will use the original header as the base and be
  1168. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
  1169. */
  1170. hdr = (void *)first_hdr;
  1171. if (ieee80211_is_data_qos(hdr->frame_control)) {
  1172. qos = ieee80211_get_qos_ctl(hdr);
  1173. qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
  1174. }
  1175. /* Some attention flags are valid only in the last MSDU. */
  1176. last = skb_peek_tail(amsdu);
  1177. rxd = (void *)last->data - sizeof(*rxd);
  1178. attention = __le32_to_cpu(rxd->attention.flags);
  1179. has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
  1180. has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
  1181. has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
  1182. has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
  1183. /* Note: If hardware captures an encrypted frame that it can't decrypt,
  1184. * e.g. due to fcs error, missing peer or invalid key data it will
  1185. * report the frame as raw.
  1186. */
  1187. is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
  1188. !has_fcs_err &&
  1189. !has_crypto_err &&
  1190. !has_peer_idx_invalid);
  1191. /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
  1192. status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
  1193. RX_FLAG_MMIC_ERROR |
  1194. RX_FLAG_DECRYPTED |
  1195. RX_FLAG_IV_STRIPPED |
  1196. RX_FLAG_MMIC_STRIPPED);
  1197. if (has_fcs_err)
  1198. status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1199. if (has_tkip_err)
  1200. status->flag |= RX_FLAG_MMIC_ERROR;
  1201. if (is_decrypted) {
  1202. status->flag |= RX_FLAG_DECRYPTED |
  1203. RX_FLAG_MMIC_STRIPPED;
  1204. if (fill_crypt_header)
  1205. status->flag |= RX_FLAG_MIC_STRIPPED |
  1206. RX_FLAG_ICV_STRIPPED;
  1207. else
  1208. status->flag |= RX_FLAG_IV_STRIPPED;
  1209. }
  1210. skb_queue_walk(amsdu, msdu) {
  1211. ath10k_htt_rx_h_csum_offload(msdu);
  1212. ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
  1213. is_decrypted);
  1214. /* Undecapping involves copying the original 802.11 header back
  1215. * to sk_buff. If frame is protected and hardware has decrypted
  1216. * it then remove the protected bit.
  1217. */
  1218. if (!is_decrypted)
  1219. continue;
  1220. if (fill_crypt_header)
  1221. continue;
  1222. hdr = (void *)msdu->data;
  1223. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1224. }
  1225. }
  1226. static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
  1227. struct sk_buff_head *amsdu,
  1228. struct ieee80211_rx_status *status)
  1229. {
  1230. struct sk_buff *msdu;
  1231. struct sk_buff *first_subframe;
  1232. first_subframe = skb_peek(amsdu);
  1233. while ((msdu = __skb_dequeue(amsdu))) {
  1234. /* Setup per-MSDU flags */
  1235. if (skb_queue_empty(amsdu))
  1236. status->flag &= ~RX_FLAG_AMSDU_MORE;
  1237. else
  1238. status->flag |= RX_FLAG_AMSDU_MORE;
  1239. if (msdu == first_subframe) {
  1240. first_subframe = NULL;
  1241. status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
  1242. } else {
  1243. status->flag |= RX_FLAG_ALLOW_SAME_PN;
  1244. }
  1245. ath10k_process_rx(ar, status, msdu);
  1246. }
  1247. }
  1248. static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
  1249. {
  1250. struct sk_buff *skb, *first;
  1251. int space;
  1252. int total_len = 0;
  1253. /* TODO: Might could optimize this by using
  1254. * skb_try_coalesce or similar method to
  1255. * decrease copying, or maybe get mac80211 to
  1256. * provide a way to just receive a list of
  1257. * skb?
  1258. */
  1259. first = __skb_dequeue(amsdu);
  1260. /* Allocate total length all at once. */
  1261. skb_queue_walk(amsdu, skb)
  1262. total_len += skb->len;
  1263. space = total_len - skb_tailroom(first);
  1264. if ((space > 0) &&
  1265. (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
  1266. /* TODO: bump some rx-oom error stat */
  1267. /* put it back together so we can free the
  1268. * whole list at once.
  1269. */
  1270. __skb_queue_head(amsdu, first);
  1271. return -1;
  1272. }
  1273. /* Walk list again, copying contents into
  1274. * msdu_head
  1275. */
  1276. while ((skb = __skb_dequeue(amsdu))) {
  1277. skb_copy_from_linear_data(skb, skb_put(first, skb->len),
  1278. skb->len);
  1279. dev_kfree_skb_any(skb);
  1280. }
  1281. __skb_queue_head(amsdu, first);
  1282. return 0;
  1283. }
  1284. static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
  1285. struct sk_buff_head *amsdu,
  1286. bool chained)
  1287. {
  1288. struct sk_buff *first;
  1289. struct htt_rx_desc *rxd;
  1290. enum rx_msdu_decap_format decap;
  1291. first = skb_peek(amsdu);
  1292. rxd = (void *)first->data - sizeof(*rxd);
  1293. decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
  1294. RX_MSDU_START_INFO1_DECAP_FORMAT);
  1295. if (!chained)
  1296. return;
  1297. /* FIXME: Current unchaining logic can only handle simple case of raw
  1298. * msdu chaining. If decapping is other than raw the chaining may be
  1299. * more complex and this isn't handled by the current code. Don't even
  1300. * try re-constructing such frames - it'll be pretty much garbage.
  1301. */
  1302. if (decap != RX_MSDU_DECAP_RAW ||
  1303. skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
  1304. __skb_queue_purge(amsdu);
  1305. return;
  1306. }
  1307. ath10k_unchain_msdu(amsdu);
  1308. }
  1309. static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
  1310. struct sk_buff_head *amsdu,
  1311. struct ieee80211_rx_status *rx_status)
  1312. {
  1313. struct sk_buff *msdu;
  1314. struct htt_rx_desc *rxd;
  1315. bool is_mgmt;
  1316. bool has_fcs_err;
  1317. msdu = skb_peek(amsdu);
  1318. rxd = (void *)msdu->data - sizeof(*rxd);
  1319. /* FIXME: It might be a good idea to do some fuzzy-testing to drop
  1320. * invalid/dangerous frames.
  1321. */
  1322. if (!rx_status->freq) {
  1323. ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
  1324. return false;
  1325. }
  1326. is_mgmt = !!(rxd->attention.flags &
  1327. __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
  1328. has_fcs_err = !!(rxd->attention.flags &
  1329. __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
  1330. /* Management frames are handled via WMI events. The pros of such
  1331. * approach is that channel is explicitly provided in WMI events
  1332. * whereas HTT doesn't provide channel information for Rxed frames.
  1333. *
  1334. * However some firmware revisions don't report corrupted frames via
  1335. * WMI so don't drop them.
  1336. */
  1337. if (is_mgmt && !has_fcs_err) {
  1338. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
  1339. return false;
  1340. }
  1341. if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
  1342. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
  1343. return false;
  1344. }
  1345. return true;
  1346. }
  1347. static void ath10k_htt_rx_h_filter(struct ath10k *ar,
  1348. struct sk_buff_head *amsdu,
  1349. struct ieee80211_rx_status *rx_status)
  1350. {
  1351. if (skb_queue_empty(amsdu))
  1352. return;
  1353. if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
  1354. return;
  1355. __skb_queue_purge(amsdu);
  1356. }
  1357. static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
  1358. struct htt_rx_indication *rx)
  1359. {
  1360. struct ath10k *ar = htt->ar;
  1361. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1362. struct htt_rx_indication_mpdu_range *mpdu_ranges;
  1363. struct sk_buff_head amsdu;
  1364. int num_mpdu_ranges;
  1365. int fw_desc_len;
  1366. u8 *fw_desc;
  1367. int i, ret, mpdu_count = 0;
  1368. lockdep_assert_held(&htt->rx_ring.lock);
  1369. if (htt->rx_confused)
  1370. return;
  1371. fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
  1372. fw_desc = (u8 *)&rx->fw_desc;
  1373. num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
  1374. HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
  1375. mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
  1376. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
  1377. rx, sizeof(*rx) +
  1378. (sizeof(struct htt_rx_indication_mpdu_range) *
  1379. num_mpdu_ranges));
  1380. for (i = 0; i < num_mpdu_ranges; i++)
  1381. mpdu_count += mpdu_ranges[i].mpdu_count;
  1382. while (mpdu_count--) {
  1383. __skb_queue_head_init(&amsdu);
  1384. ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
  1385. &fw_desc_len, &amsdu);
  1386. if (ret < 0) {
  1387. ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
  1388. __skb_queue_purge(&amsdu);
  1389. /* FIXME: It's probably a good idea to reboot the
  1390. * device instead of leaving it inoperable.
  1391. */
  1392. htt->rx_confused = true;
  1393. break;
  1394. }
  1395. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
  1396. ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
  1397. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  1398. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
  1399. ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  1400. }
  1401. tasklet_schedule(&htt->rx_replenish_task);
  1402. }
  1403. static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
  1404. struct htt_rx_fragment_indication *frag)
  1405. {
  1406. struct ath10k *ar = htt->ar;
  1407. struct ieee80211_rx_status *rx_status = &htt->rx_status;
  1408. struct sk_buff_head amsdu;
  1409. int ret;
  1410. u8 *fw_desc;
  1411. int fw_desc_len;
  1412. fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
  1413. fw_desc = (u8 *)frag->fw_msdu_rx_desc;
  1414. __skb_queue_head_init(&amsdu);
  1415. spin_lock_bh(&htt->rx_ring.lock);
  1416. ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
  1417. &amsdu);
  1418. spin_unlock_bh(&htt->rx_ring.lock);
  1419. tasklet_schedule(&htt->rx_replenish_task);
  1420. ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
  1421. if (ret) {
  1422. ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
  1423. ret);
  1424. __skb_queue_purge(&amsdu);
  1425. return;
  1426. }
  1427. if (skb_queue_len(&amsdu) != 1) {
  1428. ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
  1429. __skb_queue_purge(&amsdu);
  1430. return;
  1431. }
  1432. ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
  1433. ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
  1434. ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
  1435. ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
  1436. if (fw_desc_len > 0) {
  1437. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1438. "expecting more fragmented rx in one indication %d\n",
  1439. fw_desc_len);
  1440. }
  1441. }
  1442. static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
  1443. struct sk_buff *skb)
  1444. {
  1445. struct ath10k_htt *htt = &ar->htt;
  1446. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1447. struct htt_tx_done tx_done = {};
  1448. int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
  1449. __le16 msdu_id;
  1450. int i;
  1451. switch (status) {
  1452. case HTT_DATA_TX_STATUS_NO_ACK:
  1453. tx_done.no_ack = true;
  1454. break;
  1455. case HTT_DATA_TX_STATUS_OK:
  1456. tx_done.success = true;
  1457. break;
  1458. case HTT_DATA_TX_STATUS_DISCARD:
  1459. case HTT_DATA_TX_STATUS_POSTPONE:
  1460. case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
  1461. tx_done.discard = true;
  1462. break;
  1463. default:
  1464. ath10k_warn(ar, "unhandled tx completion status %d\n", status);
  1465. tx_done.discard = true;
  1466. break;
  1467. }
  1468. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
  1469. resp->data_tx_completion.num_msdus);
  1470. for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
  1471. msdu_id = resp->data_tx_completion.msdus[i];
  1472. tx_done.msdu_id = __le16_to_cpu(msdu_id);
  1473. ath10k_txrx_tx_unref(htt, &tx_done);
  1474. }
  1475. }
  1476. static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
  1477. {
  1478. struct htt_rx_addba *ev = &resp->rx_addba;
  1479. struct ath10k_peer *peer;
  1480. struct ath10k_vif *arvif;
  1481. u16 info0, tid, peer_id;
  1482. info0 = __le16_to_cpu(ev->info0);
  1483. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1484. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1485. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1486. "htt rx addba tid %hu peer_id %hu size %hhu\n",
  1487. tid, peer_id, ev->window_size);
  1488. spin_lock_bh(&ar->data_lock);
  1489. peer = ath10k_peer_find_by_id(ar, peer_id);
  1490. if (!peer) {
  1491. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1492. peer_id);
  1493. spin_unlock_bh(&ar->data_lock);
  1494. return;
  1495. }
  1496. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1497. if (!arvif) {
  1498. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1499. peer->vdev_id);
  1500. spin_unlock_bh(&ar->data_lock);
  1501. return;
  1502. }
  1503. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1504. "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
  1505. peer->addr, tid, ev->window_size);
  1506. ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1507. spin_unlock_bh(&ar->data_lock);
  1508. }
  1509. static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
  1510. {
  1511. struct htt_rx_delba *ev = &resp->rx_delba;
  1512. struct ath10k_peer *peer;
  1513. struct ath10k_vif *arvif;
  1514. u16 info0, tid, peer_id;
  1515. info0 = __le16_to_cpu(ev->info0);
  1516. tid = MS(info0, HTT_RX_BA_INFO0_TID);
  1517. peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
  1518. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1519. "htt rx delba tid %hu peer_id %hu\n",
  1520. tid, peer_id);
  1521. spin_lock_bh(&ar->data_lock);
  1522. peer = ath10k_peer_find_by_id(ar, peer_id);
  1523. if (!peer) {
  1524. ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
  1525. peer_id);
  1526. spin_unlock_bh(&ar->data_lock);
  1527. return;
  1528. }
  1529. arvif = ath10k_get_arvif(ar, peer->vdev_id);
  1530. if (!arvif) {
  1531. ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
  1532. peer->vdev_id);
  1533. spin_unlock_bh(&ar->data_lock);
  1534. return;
  1535. }
  1536. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1537. "htt rx stop rx ba session sta %pM tid %hu\n",
  1538. peer->addr, tid);
  1539. ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
  1540. spin_unlock_bh(&ar->data_lock);
  1541. }
  1542. static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
  1543. struct sk_buff_head *amsdu)
  1544. {
  1545. struct sk_buff *msdu;
  1546. struct htt_rx_desc *rxd;
  1547. if (skb_queue_empty(list))
  1548. return -ENOBUFS;
  1549. if (WARN_ON(!skb_queue_empty(amsdu)))
  1550. return -EINVAL;
  1551. while ((msdu = __skb_dequeue(list))) {
  1552. __skb_queue_tail(amsdu, msdu);
  1553. rxd = (void *)msdu->data - sizeof(*rxd);
  1554. if (rxd->msdu_end.common.info0 &
  1555. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
  1556. break;
  1557. }
  1558. msdu = skb_peek_tail(amsdu);
  1559. rxd = (void *)msdu->data - sizeof(*rxd);
  1560. if (!(rxd->msdu_end.common.info0 &
  1561. __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
  1562. skb_queue_splice_init(amsdu, list);
  1563. return -EAGAIN;
  1564. }
  1565. return 0;
  1566. }
  1567. static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
  1568. struct sk_buff *skb)
  1569. {
  1570. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1571. if (!ieee80211_has_protected(hdr->frame_control))
  1572. return;
  1573. /* Offloaded frames are already decrypted but firmware insists they are
  1574. * protected in the 802.11 header. Strip the flag. Otherwise mac80211
  1575. * will drop the frame.
  1576. */
  1577. hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  1578. status->flag |= RX_FLAG_DECRYPTED |
  1579. RX_FLAG_IV_STRIPPED |
  1580. RX_FLAG_MMIC_STRIPPED;
  1581. }
  1582. static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
  1583. struct sk_buff_head *list)
  1584. {
  1585. struct ath10k_htt *htt = &ar->htt;
  1586. struct ieee80211_rx_status *status = &htt->rx_status;
  1587. struct htt_rx_offload_msdu *rx;
  1588. struct sk_buff *msdu;
  1589. size_t offset;
  1590. while ((msdu = __skb_dequeue(list))) {
  1591. /* Offloaded frames don't have Rx descriptor. Instead they have
  1592. * a short meta information header.
  1593. */
  1594. rx = (void *)msdu->data;
  1595. skb_put(msdu, sizeof(*rx));
  1596. skb_pull(msdu, sizeof(*rx));
  1597. if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
  1598. ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
  1599. dev_kfree_skb_any(msdu);
  1600. continue;
  1601. }
  1602. skb_put(msdu, __le16_to_cpu(rx->msdu_len));
  1603. /* Offloaded rx header length isn't multiple of 2 nor 4 so the
  1604. * actual payload is unaligned. Align the frame. Otherwise
  1605. * mac80211 complains. This shouldn't reduce performance much
  1606. * because these offloaded frames are rare.
  1607. */
  1608. offset = 4 - ((unsigned long)msdu->data & 3);
  1609. skb_put(msdu, offset);
  1610. memmove(msdu->data + offset, msdu->data, msdu->len);
  1611. skb_pull(msdu, offset);
  1612. /* FIXME: The frame is NWifi. Re-construct QoS Control
  1613. * if possible later.
  1614. */
  1615. memset(status, 0, sizeof(*status));
  1616. status->flag |= RX_FLAG_NO_SIGNAL_VAL;
  1617. ath10k_htt_rx_h_rx_offload_prot(status, msdu);
  1618. ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
  1619. ath10k_process_rx(ar, status, msdu);
  1620. }
  1621. }
  1622. static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
  1623. {
  1624. struct ath10k_htt *htt = &ar->htt;
  1625. struct htt_resp *resp = (void *)skb->data;
  1626. struct ieee80211_rx_status *status = &htt->rx_status;
  1627. struct sk_buff_head list;
  1628. struct sk_buff_head amsdu;
  1629. u16 peer_id;
  1630. u16 msdu_count;
  1631. u8 vdev_id;
  1632. u8 tid;
  1633. bool offload;
  1634. bool frag;
  1635. int ret;
  1636. lockdep_assert_held(&htt->rx_ring.lock);
  1637. if (htt->rx_confused)
  1638. return;
  1639. skb_pull(skb, sizeof(resp->hdr));
  1640. skb_pull(skb, sizeof(resp->rx_in_ord_ind));
  1641. peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
  1642. msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
  1643. vdev_id = resp->rx_in_ord_ind.vdev_id;
  1644. tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
  1645. offload = !!(resp->rx_in_ord_ind.info &
  1646. HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
  1647. frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
  1648. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1649. "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
  1650. vdev_id, peer_id, tid, offload, frag, msdu_count);
  1651. if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
  1652. ath10k_warn(ar, "dropping invalid in order rx indication\n");
  1653. return;
  1654. }
  1655. /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
  1656. * extracted and processed.
  1657. */
  1658. __skb_queue_head_init(&list);
  1659. ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
  1660. if (ret < 0) {
  1661. ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
  1662. htt->rx_confused = true;
  1663. return;
  1664. }
  1665. /* Offloaded frames are very different and need to be handled
  1666. * separately.
  1667. */
  1668. if (offload)
  1669. ath10k_htt_rx_h_rx_offload(ar, &list);
  1670. while (!skb_queue_empty(&list)) {
  1671. __skb_queue_head_init(&amsdu);
  1672. ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
  1673. switch (ret) {
  1674. case 0:
  1675. /* Note: The in-order indication may report interleaved
  1676. * frames from different PPDUs meaning reported rx rate
  1677. * to mac80211 isn't accurate/reliable. It's still
  1678. * better to report something than nothing though. This
  1679. * should still give an idea about rx rate to the user.
  1680. */
  1681. ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
  1682. ath10k_htt_rx_h_filter(ar, &amsdu, status);
  1683. ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
  1684. ath10k_htt_rx_h_deliver(ar, &amsdu, status);
  1685. break;
  1686. case -EAGAIN:
  1687. /* fall through */
  1688. default:
  1689. /* Should not happen. */
  1690. ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
  1691. htt->rx_confused = true;
  1692. __skb_queue_purge(&list);
  1693. return;
  1694. }
  1695. }
  1696. tasklet_schedule(&htt->rx_replenish_task);
  1697. }
  1698. void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
  1699. {
  1700. struct ath10k_htt *htt = &ar->htt;
  1701. struct htt_resp *resp = (struct htt_resp *)skb->data;
  1702. enum htt_t2h_msg_type type;
  1703. /* confirm alignment */
  1704. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  1705. ath10k_warn(ar, "unaligned htt message, expect trouble\n");
  1706. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
  1707. resp->hdr.msg_type);
  1708. if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
  1709. ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
  1710. resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
  1711. dev_kfree_skb_any(skb);
  1712. return;
  1713. }
  1714. type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
  1715. switch (type) {
  1716. case HTT_T2H_MSG_TYPE_VERSION_CONF: {
  1717. htt->target_version_major = resp->ver_resp.major;
  1718. htt->target_version_minor = resp->ver_resp.minor;
  1719. complete(&htt->target_version_received);
  1720. break;
  1721. }
  1722. case HTT_T2H_MSG_TYPE_RX_IND:
  1723. spin_lock_bh(&htt->rx_ring.lock);
  1724. __skb_queue_tail(&htt->rx_compl_q, skb);
  1725. spin_unlock_bh(&htt->rx_ring.lock);
  1726. tasklet_schedule(&htt->txrx_compl_task);
  1727. return;
  1728. case HTT_T2H_MSG_TYPE_PEER_MAP: {
  1729. struct htt_peer_map_event ev = {
  1730. .vdev_id = resp->peer_map.vdev_id,
  1731. .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
  1732. };
  1733. memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
  1734. ath10k_peer_map_event(htt, &ev);
  1735. break;
  1736. }
  1737. case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
  1738. struct htt_peer_unmap_event ev = {
  1739. .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
  1740. };
  1741. ath10k_peer_unmap_event(htt, &ev);
  1742. break;
  1743. }
  1744. case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
  1745. struct htt_tx_done tx_done = {};
  1746. int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
  1747. tx_done.msdu_id =
  1748. __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
  1749. switch (status) {
  1750. case HTT_MGMT_TX_STATUS_OK:
  1751. tx_done.success = true;
  1752. break;
  1753. case HTT_MGMT_TX_STATUS_RETRY:
  1754. tx_done.no_ack = true;
  1755. break;
  1756. case HTT_MGMT_TX_STATUS_DROP:
  1757. tx_done.discard = true;
  1758. break;
  1759. }
  1760. ath10k_txrx_tx_unref(htt, &tx_done);
  1761. break;
  1762. }
  1763. case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
  1764. skb_queue_tail(&htt->tx_compl_q, skb);
  1765. tasklet_schedule(&htt->txrx_compl_task);
  1766. return;
  1767. case HTT_T2H_MSG_TYPE_SEC_IND: {
  1768. struct ath10k *ar = htt->ar;
  1769. struct htt_security_indication *ev = &resp->security_indication;
  1770. ath10k_dbg(ar, ATH10K_DBG_HTT,
  1771. "sec ind peer_id %d unicast %d type %d\n",
  1772. __le16_to_cpu(ev->peer_id),
  1773. !!(ev->flags & HTT_SECURITY_IS_UNICAST),
  1774. MS(ev->flags, HTT_SECURITY_TYPE));
  1775. complete(&ar->install_key_done);
  1776. break;
  1777. }
  1778. case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
  1779. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1780. skb->data, skb->len);
  1781. ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
  1782. break;
  1783. }
  1784. case HTT_T2H_MSG_TYPE_TEST:
  1785. break;
  1786. case HTT_T2H_MSG_TYPE_STATS_CONF:
  1787. trace_ath10k_htt_stats(ar, skb->data, skb->len);
  1788. break;
  1789. case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
  1790. /* Firmware can return tx frames if it's unable to fully
  1791. * process them and suspects host may be able to fix it. ath10k
  1792. * sends all tx frames as already inspected so this shouldn't
  1793. * happen unless fw has a bug.
  1794. */
  1795. ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
  1796. break;
  1797. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  1798. ath10k_htt_rx_addba(ar, resp);
  1799. break;
  1800. case HTT_T2H_MSG_TYPE_RX_DELBA:
  1801. ath10k_htt_rx_delba(ar, resp);
  1802. break;
  1803. case HTT_T2H_MSG_TYPE_PKTLOG: {
  1804. struct ath10k_pktlog_hdr *hdr =
  1805. (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
  1806. trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
  1807. sizeof(*hdr) +
  1808. __le16_to_cpu(hdr->size));
  1809. break;
  1810. }
  1811. case HTT_T2H_MSG_TYPE_RX_FLUSH: {
  1812. /* Ignore this event because mac80211 takes care of Rx
  1813. * aggregation reordering.
  1814. */
  1815. break;
  1816. }
  1817. case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
  1818. spin_lock_bh(&htt->rx_ring.lock);
  1819. __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
  1820. spin_unlock_bh(&htt->rx_ring.lock);
  1821. tasklet_schedule(&htt->txrx_compl_task);
  1822. return;
  1823. }
  1824. case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
  1825. break;
  1826. case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
  1827. break;
  1828. case HTT_T2H_MSG_TYPE_AGGR_CONF:
  1829. break;
  1830. case HTT_T2H_MSG_TYPE_EN_STATS:
  1831. case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
  1832. case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
  1833. case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
  1834. default:
  1835. ath10k_warn(ar, "htt event (%d) not handled\n",
  1836. resp->hdr.msg_type);
  1837. ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
  1838. skb->data, skb->len);
  1839. break;
  1840. };
  1841. /* Free the indication buffer */
  1842. dev_kfree_skb_any(skb);
  1843. }
  1844. EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
  1845. static void ath10k_htt_txrx_compl_task(unsigned long ptr)
  1846. {
  1847. struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
  1848. struct ath10k *ar = htt->ar;
  1849. struct htt_resp *resp;
  1850. struct sk_buff *skb;
  1851. while ((skb = skb_dequeue(&htt->tx_compl_q))) {
  1852. ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
  1853. dev_kfree_skb_any(skb);
  1854. }
  1855. spin_lock_bh(&htt->rx_ring.lock);
  1856. while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
  1857. resp = (struct htt_resp *)skb->data;
  1858. ath10k_htt_rx_handler(htt, &resp->rx_ind);
  1859. dev_kfree_skb_any(skb);
  1860. }
  1861. while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
  1862. ath10k_htt_rx_in_ord_ind(ar, skb);
  1863. dev_kfree_skb_any(skb);
  1864. }
  1865. spin_unlock_bh(&htt->rx_ring.lock);
  1866. }