tx.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /*
  2. * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
  3. * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include "mt7601u.h"
  15. #include "trace.h"
  16. enum mt76_txq_id {
  17. MT_TXQ_VO = IEEE80211_AC_VO,
  18. MT_TXQ_VI = IEEE80211_AC_VI,
  19. MT_TXQ_BE = IEEE80211_AC_BE,
  20. MT_TXQ_BK = IEEE80211_AC_BK,
  21. MT_TXQ_PSD,
  22. MT_TXQ_MCU,
  23. __MT_TXQ_MAX
  24. };
  25. /* Hardware uses mirrored order of queues with Q0 having the highest priority */
  26. static u8 q2hwq(u8 q)
  27. {
  28. return q ^ 0x3;
  29. }
  30. /* Take mac80211 Q id from the skb and translate it to hardware Q id */
  31. static u8 skb2q(struct sk_buff *skb)
  32. {
  33. int qid = skb_get_queue_mapping(skb);
  34. if (WARN_ON(qid >= MT_TXQ_PSD)) {
  35. qid = MT_TXQ_BE;
  36. skb_set_queue_mapping(skb, qid);
  37. }
  38. return q2hwq(qid);
  39. }
  40. /* Note: TX retry reporting is a bit broken.
  41. * Retries are reported only once per AMPDU and often come a frame early
  42. * i.e. they are reported in the last status preceding the AMPDU. Apart
  43. * from the fact that it's hard to know the length of the AMPDU (which is
  44. * required to know to how many consecutive frames retries should be
  45. * applied), if status comes early on full FIFO it gets lost and retries
  46. * of the whole AMPDU become invisible.
  47. * As a work-around encode the desired rate in PKT_ID of TX descriptor
  48. * and based on that guess the retries (every rate is tried once).
  49. * Only downside here is that for MCS0 we have to rely solely on
  50. * transmission failures as no retries can ever be reported.
  51. * Not having to read EXT_FIFO has a nice effect of doubling the number
  52. * of reports which can be fetched.
  53. * Also the vendor driver never uses the EXT_FIFO register so it may be
  54. * undertested.
  55. */
  56. static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
  57. {
  58. u8 encoded = (rate + 1) + is_probe * 8;
  59. /* Because PKT_ID 0 disables status reporting only 15 values are
  60. * available but 16 are needed (8 MCS * 2 for encoding is_probe)
  61. * - we need to cram together two rates. MCS0 and MCS7 with is_probe
  62. * share PKT_ID 9.
  63. */
  64. if (is_probe && rate == 7)
  65. return encoded - 7;
  66. return encoded;
  67. }
  68. static void
  69. mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
  70. {
  71. u8 req_rate = stat->pktid;
  72. u8 eff_rate = stat->rate & 0x7;
  73. req_rate -= 1;
  74. if (req_rate > 7) {
  75. stat->is_probe = true;
  76. req_rate -= 8;
  77. /* Decide between MCS0 and MCS7 which share pktid 9 */
  78. if (!req_rate && eff_rate)
  79. req_rate = 7;
  80. }
  81. stat->retry = req_rate - eff_rate;
  82. }
  83. static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
  84. struct ieee80211_tx_info *info)
  85. {
  86. int pkt_len = (unsigned long)info->status.status_driver_data[0];
  87. skb_pull(skb, sizeof(struct mt76_txwi) + 4);
  88. if (ieee80211_get_hdrlen_from_skb(skb) % 4)
  89. mt76_remove_hdr_pad(skb);
  90. skb_trim(skb, pkt_len);
  91. }
  92. void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
  93. {
  94. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  95. mt7601u_tx_skb_remove_dma_overhead(skb, info);
  96. ieee80211_tx_info_clear_status(info);
  97. info->status.rates[0].idx = -1;
  98. info->flags |= IEEE80211_TX_STAT_ACK;
  99. spin_lock(&dev->mac_lock);
  100. ieee80211_tx_status(dev->hw, skb);
  101. spin_unlock(&dev->mac_lock);
  102. }
  103. static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
  104. {
  105. int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
  106. u32 need_head;
  107. need_head = sizeof(struct mt76_txwi) + 4;
  108. if (hdr_len % 4)
  109. need_head += 2;
  110. return skb_cow(skb, need_head);
  111. }
  112. static struct mt76_txwi *
  113. mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
  114. struct ieee80211_sta *sta, struct mt76_wcid *wcid,
  115. int pkt_len)
  116. {
  117. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  118. struct ieee80211_tx_rate *rate = &info->control.rates[0];
  119. struct mt76_txwi *txwi;
  120. unsigned long flags;
  121. bool is_probe;
  122. u32 pkt_id;
  123. u16 rate_ctl;
  124. u8 nss;
  125. txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
  126. memset(txwi, 0, sizeof(*txwi));
  127. if (!wcid->tx_rate_set)
  128. ieee80211_get_tx_rates(info->control.vif, sta, skb,
  129. info->control.rates, 1);
  130. spin_lock_irqsave(&dev->lock, flags);
  131. if (rate->idx < 0 || !rate->count)
  132. rate_ctl = wcid->tx_rate;
  133. else
  134. rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
  135. spin_unlock_irqrestore(&dev->lock, flags);
  136. txwi->rate_ctl = cpu_to_le16(rate_ctl);
  137. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  138. txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
  139. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  140. txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
  141. if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
  142. u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
  143. ba_size <<= sta->ht_cap.ampdu_factor;
  144. ba_size = min_t(int, 63, ba_size);
  145. if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
  146. ba_size = 0;
  147. txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
  148. txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
  149. MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
  150. sta->ht_cap.ampdu_density));
  151. if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
  152. txwi->flags = 0;
  153. }
  154. txwi->wcid = wcid->idx;
  155. is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
  156. pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
  157. pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
  158. txwi->len_ctl = cpu_to_le16(pkt_len);
  159. return txwi;
  160. }
  161. void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
  162. struct sk_buff *skb)
  163. {
  164. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  165. struct mt7601u_dev *dev = hw->priv;
  166. struct ieee80211_vif *vif = info->control.vif;
  167. struct ieee80211_sta *sta = control->sta;
  168. struct mt76_sta *msta = NULL;
  169. struct mt76_wcid *wcid = dev->mon_wcid;
  170. struct mt76_txwi *txwi;
  171. int pkt_len = skb->len;
  172. int hw_q = skb2q(skb);
  173. BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
  174. info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
  175. if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
  176. ieee80211_free_txskb(dev->hw, skb);
  177. return;
  178. }
  179. if (sta) {
  180. msta = (struct mt76_sta *) sta->drv_priv;
  181. wcid = &msta->wcid;
  182. } else if (vif) {
  183. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  184. wcid = &mvif->group_wcid;
  185. }
  186. txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
  187. if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
  188. return;
  189. trace_mt_tx(dev, skb, msta, txwi);
  190. }
  191. void mt7601u_tx_stat(struct work_struct *work)
  192. {
  193. struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
  194. stat_work.work);
  195. struct mt76_tx_status stat;
  196. unsigned long flags;
  197. int cleaned = 0;
  198. while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
  199. stat = mt7601u_mac_fetch_tx_status(dev);
  200. if (!stat.valid)
  201. break;
  202. mt7601u_tx_pktid_dec(dev, &stat);
  203. mt76_send_tx_status(dev, &stat);
  204. cleaned++;
  205. }
  206. trace_mt_tx_status_cleaned(dev, cleaned);
  207. spin_lock_irqsave(&dev->tx_lock, flags);
  208. if (cleaned)
  209. queue_delayed_work(dev->stat_wq, &dev->stat_work,
  210. msecs_to_jiffies(10));
  211. else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
  212. queue_delayed_work(dev->stat_wq, &dev->stat_work,
  213. msecs_to_jiffies(20));
  214. else
  215. clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
  216. spin_unlock_irqrestore(&dev->tx_lock, flags);
  217. }
  218. int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  219. u16 queue, const struct ieee80211_tx_queue_params *params)
  220. {
  221. struct mt7601u_dev *dev = hw->priv;
  222. u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
  223. u32 val;
  224. /* TODO: should we do funny things with the parameters?
  225. * See what mt7601u_set_default_edca() used to do in init.c.
  226. */
  227. if (params->cw_min)
  228. cw_min = fls(params->cw_min);
  229. if (params->cw_max)
  230. cw_max = fls(params->cw_max);
  231. WARN_ON(params->txop > 0xff);
  232. WARN_ON(params->aifs > 0xf);
  233. WARN_ON(cw_min > 0xf);
  234. WARN_ON(cw_max > 0xf);
  235. val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
  236. MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
  237. MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
  238. /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
  239. * a really long txop on AC0 (see connect.c:2009) but only on
  240. * connect? When not connected should be 0.
  241. */
  242. if (!hw_q)
  243. val |= 0x60;
  244. else
  245. val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
  246. mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
  247. val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
  248. val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
  249. val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
  250. mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
  251. val = mt76_rr(dev, MT_WMM_AIFSN);
  252. val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
  253. val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
  254. mt76_wr(dev, MT_WMM_AIFSN, val);
  255. val = mt76_rr(dev, MT_WMM_CWMIN);
  256. val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
  257. val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
  258. mt76_wr(dev, MT_WMM_CWMIN, val);
  259. val = mt76_rr(dev, MT_WMM_CWMAX);
  260. val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
  261. val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
  262. mt76_wr(dev, MT_WMM_CWMAX, val);
  263. return 0;
  264. }