rx_reorder.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. /*
  2. * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "wil6210.h"
  17. #include "txrx.h"
  18. #define SEQ_MODULO 0x1000
  19. #define SEQ_MASK 0xfff
  20. static inline int seq_less(u16 sq1, u16 sq2)
  21. {
  22. return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
  23. }
  24. static inline u16 seq_inc(u16 sq)
  25. {
  26. return (sq + 1) & SEQ_MASK;
  27. }
  28. static inline u16 seq_sub(u16 sq1, u16 sq2)
  29. {
  30. return (sq1 - sq2) & SEQ_MASK;
  31. }
  32. static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
  33. {
  34. return seq_sub(seq, r->ssn) % r->buf_size;
  35. }
  36. static void wil_release_reorder_frame(struct wil6210_priv *wil,
  37. struct wil_tid_ampdu_rx *r,
  38. int index)
  39. {
  40. struct net_device *ndev = wil_to_ndev(wil);
  41. struct sk_buff *skb = r->reorder_buf[index];
  42. if (!skb)
  43. goto no_frame;
  44. /* release the frame from the reorder ring buffer */
  45. r->stored_mpdu_num--;
  46. r->reorder_buf[index] = NULL;
  47. wil_netif_rx_any(skb, ndev);
  48. no_frame:
  49. r->head_seq_num = seq_inc(r->head_seq_num);
  50. }
  51. static void wil_release_reorder_frames(struct wil6210_priv *wil,
  52. struct wil_tid_ampdu_rx *r,
  53. u16 hseq)
  54. {
  55. int index;
  56. /* note: this function is never called with
  57. * hseq preceding r->head_seq_num, i.e it is always true
  58. * !seq_less(hseq, r->head_seq_num)
  59. * and thus on loop exit it should be
  60. * r->head_seq_num == hseq
  61. */
  62. while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
  63. index = reorder_index(r, r->head_seq_num);
  64. wil_release_reorder_frame(wil, r, index);
  65. }
  66. r->head_seq_num = hseq;
  67. }
  68. static void wil_reorder_release(struct wil6210_priv *wil,
  69. struct wil_tid_ampdu_rx *r)
  70. {
  71. int index = reorder_index(r, r->head_seq_num);
  72. while (r->reorder_buf[index]) {
  73. wil_release_reorder_frame(wil, r, index);
  74. index = reorder_index(r, r->head_seq_num);
  75. }
  76. }
  77. /* called in NAPI context */
  78. void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
  79. __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
  80. {
  81. struct net_device *ndev = wil_to_ndev(wil);
  82. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  83. int tid = wil_rxdesc_tid(d);
  84. int cid = wil_rxdesc_cid(d);
  85. int mid = wil_rxdesc_mid(d);
  86. u16 seq = wil_rxdesc_seq(d);
  87. int mcast = wil_rxdesc_mcast(d);
  88. struct wil_sta_info *sta = &wil->sta[cid];
  89. struct wil_tid_ampdu_rx *r;
  90. u16 hseq;
  91. int index;
  92. wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
  93. mid, cid, tid, seq, mcast);
  94. if (unlikely(mcast)) {
  95. wil_netif_rx_any(skb, ndev);
  96. return;
  97. }
  98. spin_lock(&sta->tid_rx_lock);
  99. r = sta->tid_rx[tid];
  100. if (!r) {
  101. wil_netif_rx_any(skb, ndev);
  102. goto out;
  103. }
  104. r->total++;
  105. hseq = r->head_seq_num;
  106. /** Due to the race between WMI events, where BACK establishment
  107. * reported, and data Rx, few packets may be pass up before reorder
  108. * buffer get allocated. Catch up by pretending SSN is what we
  109. * see in the 1-st Rx packet
  110. *
  111. * Another scenario, Rx get delayed and we got packet from before
  112. * BACK. Pass it to the stack and wait.
  113. */
  114. if (r->first_time) {
  115. r->first_time = false;
  116. if (seq != r->head_seq_num) {
  117. if (seq_less(seq, r->head_seq_num)) {
  118. wil_err(wil,
  119. "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
  120. seq, r->head_seq_num);
  121. r->first_time = true;
  122. wil_netif_rx_any(skb, ndev);
  123. goto out;
  124. }
  125. wil_err(wil,
  126. "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
  127. seq, r->head_seq_num);
  128. r->head_seq_num = seq;
  129. r->ssn = seq;
  130. }
  131. }
  132. /* frame with out of date sequence number */
  133. if (seq_less(seq, r->head_seq_num)) {
  134. r->ssn_last_drop = seq;
  135. r->drop_old++;
  136. wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
  137. seq, r->head_seq_num);
  138. dev_kfree_skb(skb);
  139. goto out;
  140. }
  141. /*
  142. * If frame the sequence number exceeds our buffering window
  143. * size release some previous frames to make room for this one.
  144. */
  145. if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
  146. hseq = seq_inc(seq_sub(seq, r->buf_size));
  147. /* release stored frames up to new head to stack */
  148. wil_release_reorder_frames(wil, r, hseq);
  149. }
  150. /* Now the new frame is always in the range of the reordering buffer */
  151. index = reorder_index(r, seq);
  152. /* check if we already stored this frame */
  153. if (r->reorder_buf[index]) {
  154. r->drop_dup++;
  155. wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
  156. dev_kfree_skb(skb);
  157. goto out;
  158. }
  159. /*
  160. * If the current MPDU is in the right order and nothing else
  161. * is stored we can process it directly, no need to buffer it.
  162. * If it is first but there's something stored, we may be able
  163. * to release frames after this one.
  164. */
  165. if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
  166. r->head_seq_num = seq_inc(r->head_seq_num);
  167. wil_netif_rx_any(skb, ndev);
  168. goto out;
  169. }
  170. /* put the frame in the reordering buffer */
  171. r->reorder_buf[index] = skb;
  172. r->reorder_time[index] = jiffies;
  173. r->stored_mpdu_num++;
  174. wil_reorder_release(wil, r);
  175. out:
  176. spin_unlock(&sta->tid_rx_lock);
  177. }
  178. /* process BAR frame, called in NAPI context */
  179. void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq)
  180. {
  181. struct wil_sta_info *sta = &wil->sta[cid];
  182. struct wil_tid_ampdu_rx *r;
  183. spin_lock(&sta->tid_rx_lock);
  184. r = sta->tid_rx[tid];
  185. if (!r) {
  186. wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
  187. goto out;
  188. }
  189. if (seq_less(seq, r->head_seq_num)) {
  190. wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
  191. seq, r->head_seq_num);
  192. goto out;
  193. }
  194. wil_dbg_txrx(wil, "BAR: CID %d TID %d Seq 0x%03x head 0x%03x\n",
  195. cid, tid, seq, r->head_seq_num);
  196. wil_release_reorder_frames(wil, r, seq);
  197. out:
  198. spin_unlock(&sta->tid_rx_lock);
  199. }
  200. struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
  201. int size, u16 ssn)
  202. {
  203. struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
  204. if (!r)
  205. return NULL;
  206. r->reorder_buf =
  207. kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
  208. r->reorder_time =
  209. kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
  210. if (!r->reorder_buf || !r->reorder_time) {
  211. kfree(r->reorder_buf);
  212. kfree(r->reorder_time);
  213. kfree(r);
  214. return NULL;
  215. }
  216. r->ssn = ssn;
  217. r->head_seq_num = ssn;
  218. r->buf_size = size;
  219. r->stored_mpdu_num = 0;
  220. r->first_time = true;
  221. return r;
  222. }
  223. void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
  224. struct wil_tid_ampdu_rx *r)
  225. {
  226. if (!r)
  227. return;
  228. wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size);
  229. kfree(r->reorder_buf);
  230. kfree(r->reorder_time);
  231. kfree(r);
  232. }
  233. /* ADDBA processing */
  234. static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
  235. {
  236. u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
  237. (mtu_max + WIL_MAX_MPDU_OVERHEAD));
  238. if (!req_agg_wsize)
  239. return max_agg_size;
  240. return min(max_agg_size, req_agg_wsize);
  241. }
  242. /* Block Ack - Rx side (recipient */
  243. int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
  244. u8 dialog_token, __le16 ba_param_set,
  245. __le16 ba_timeout, __le16 ba_seq_ctrl)
  246. {
  247. struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
  248. if (!req)
  249. return -ENOMEM;
  250. req->cidxtid = cidxtid;
  251. req->dialog_token = dialog_token;
  252. req->ba_param_set = le16_to_cpu(ba_param_set);
  253. req->ba_timeout = le16_to_cpu(ba_timeout);
  254. req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
  255. mutex_lock(&wil->back_rx_mutex);
  256. list_add_tail(&req->list, &wil->back_rx_pending);
  257. mutex_unlock(&wil->back_rx_mutex);
  258. queue_work(wil->wq_service, &wil->back_rx_worker);
  259. return 0;
  260. }
  261. static void wil_back_rx_handle(struct wil6210_priv *wil,
  262. struct wil_back_rx *req)
  263. __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
  264. {
  265. struct wil_sta_info *sta;
  266. u8 cid, tid;
  267. u16 agg_wsize = 0;
  268. /* bit 0: A-MSDU supported
  269. * bit 1: policy (should be 0 for us)
  270. * bits 2..5: TID
  271. * bits 6..15: buffer size
  272. */
  273. u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
  274. bool agg_amsdu = !!(req->ba_param_set & BIT(0));
  275. int ba_policy = req->ba_param_set & BIT(1);
  276. u16 agg_timeout = req->ba_timeout;
  277. u16 status = WLAN_STATUS_SUCCESS;
  278. u16 ssn = req->ba_seq_ctrl >> 4;
  279. struct wil_tid_ampdu_rx *r;
  280. int rc;
  281. might_sleep();
  282. parse_cidxtid(req->cidxtid, &cid, &tid);
  283. /* sanity checks */
  284. if (cid >= WIL6210_MAX_CID) {
  285. wil_err(wil, "BACK: invalid CID %d\n", cid);
  286. return;
  287. }
  288. sta = &wil->sta[cid];
  289. if (sta->status != wil_sta_connected) {
  290. wil_err(wil, "BACK: CID %d not connected\n", cid);
  291. return;
  292. }
  293. wil_dbg_wmi(wil,
  294. "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
  295. cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
  296. agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
  297. /* apply policies */
  298. if (ba_policy) {
  299. wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
  300. status = WLAN_STATUS_INVALID_QOS_PARAM;
  301. }
  302. if (status == WLAN_STATUS_SUCCESS)
  303. agg_wsize = wil_agg_size(wil, req_agg_wsize);
  304. rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
  305. agg_amsdu, agg_wsize, agg_timeout);
  306. if (rc || (status != WLAN_STATUS_SUCCESS))
  307. return;
  308. /* apply */
  309. r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
  310. spin_lock_bh(&sta->tid_rx_lock);
  311. wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
  312. sta->tid_rx[tid] = r;
  313. spin_unlock_bh(&sta->tid_rx_lock);
  314. }
  315. void wil_back_rx_flush(struct wil6210_priv *wil)
  316. {
  317. struct wil_back_rx *evt, *t;
  318. wil_dbg_misc(wil, "%s()\n", __func__);
  319. mutex_lock(&wil->back_rx_mutex);
  320. list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
  321. list_del(&evt->list);
  322. kfree(evt);
  323. }
  324. mutex_unlock(&wil->back_rx_mutex);
  325. }
  326. /* Retrieve next ADDBA request from the pending list */
  327. static struct list_head *next_back_rx(struct wil6210_priv *wil)
  328. {
  329. struct list_head *ret = NULL;
  330. mutex_lock(&wil->back_rx_mutex);
  331. if (!list_empty(&wil->back_rx_pending)) {
  332. ret = wil->back_rx_pending.next;
  333. list_del(ret);
  334. }
  335. mutex_unlock(&wil->back_rx_mutex);
  336. return ret;
  337. }
  338. void wil_back_rx_worker(struct work_struct *work)
  339. {
  340. struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
  341. back_rx_worker);
  342. struct wil_back_rx *evt;
  343. struct list_head *lh;
  344. while ((lh = next_back_rx(wil)) != NULL) {
  345. evt = list_entry(lh, struct wil_back_rx, list);
  346. wil_back_rx_handle(wil, evt);
  347. kfree(evt);
  348. }
  349. }
  350. /* BACK - Tx (originator) side */
  351. static void wil_back_tx_handle(struct wil6210_priv *wil,
  352. struct wil_back_tx *req)
  353. {
  354. struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
  355. int rc;
  356. if (txdata->addba_in_progress) {
  357. wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
  358. req->ringid);
  359. return;
  360. }
  361. if (txdata->agg_wsize) {
  362. wil_dbg_misc(wil,
  363. "ADDBA for vring[%d] already established wsize %d\n",
  364. req->ringid, txdata->agg_wsize);
  365. return;
  366. }
  367. txdata->addba_in_progress = true;
  368. rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
  369. if (rc)
  370. txdata->addba_in_progress = false;
  371. }
  372. static struct list_head *next_back_tx(struct wil6210_priv *wil)
  373. {
  374. struct list_head *ret = NULL;
  375. mutex_lock(&wil->back_tx_mutex);
  376. if (!list_empty(&wil->back_tx_pending)) {
  377. ret = wil->back_tx_pending.next;
  378. list_del(ret);
  379. }
  380. mutex_unlock(&wil->back_tx_mutex);
  381. return ret;
  382. }
  383. void wil_back_tx_worker(struct work_struct *work)
  384. {
  385. struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
  386. back_tx_worker);
  387. struct wil_back_tx *evt;
  388. struct list_head *lh;
  389. while ((lh = next_back_tx(wil)) != NULL) {
  390. evt = list_entry(lh, struct wil_back_tx, list);
  391. wil_back_tx_handle(wil, evt);
  392. kfree(evt);
  393. }
  394. }
  395. void wil_back_tx_flush(struct wil6210_priv *wil)
  396. {
  397. struct wil_back_tx *evt, *t;
  398. wil_dbg_misc(wil, "%s()\n", __func__);
  399. mutex_lock(&wil->back_tx_mutex);
  400. list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
  401. list_del(&evt->list);
  402. kfree(evt);
  403. }
  404. mutex_unlock(&wil->back_tx_mutex);
  405. }
  406. int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
  407. {
  408. struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
  409. if (!req)
  410. return -ENOMEM;
  411. req->ringid = ringid;
  412. req->agg_wsize = wil_agg_size(wil, wsize);
  413. req->agg_timeout = 0;
  414. mutex_lock(&wil->back_tx_mutex);
  415. list_add_tail(&req->list, &wil->back_tx_pending);
  416. mutex_unlock(&wil->back_tx_mutex);
  417. queue_work(wil->wq_service, &wil->back_tx_worker);
  418. return 0;
  419. }