rt2800mmio.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  2. * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
  3. * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
  4. * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
  5. * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
  6. * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
  7. * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
  8. * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
  9. * <http://rt2x00.serialmonkey.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  23. */
  24. /* Module: rt2800mmio
  25. * Abstract: rt2800 MMIO device routines.
  26. */
  27. #include <linux/kernel.h>
  28. #include <linux/module.h>
  29. #include <linux/export.h>
  30. #include "rt2x00.h"
  31. #include "rt2x00mmio.h"
  32. #include "rt2800.h"
  33. #include "rt2800lib.h"
  34. #include "rt2800mmio.h"
  35. /*
  36. * TX descriptor initialization
  37. */
  38. __le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
  39. {
  40. return (__le32 *) entry->skb->data;
  41. }
  42. EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
  43. void rt2800mmio_write_tx_desc(struct queue_entry *entry,
  44. struct txentry_desc *txdesc)
  45. {
  46. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  47. struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  48. __le32 *txd = entry_priv->desc;
  49. u32 word;
  50. const unsigned int txwi_size = entry->queue->winfo_size;
  51. /*
  52. * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
  53. * must contains a TXWI structure + 802.11 header + padding + 802.11
  54. * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
  55. * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
  56. * data. It means that LAST_SEC0 is always 0.
  57. */
  58. /*
  59. * Initialize TX descriptor
  60. */
  61. word = 0;
  62. rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
  63. rt2x00_desc_write(txd, 0, word);
  64. word = 0;
  65. rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
  66. rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
  67. !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
  68. rt2x00_set_field32(&word, TXD_W1_BURST,
  69. test_bit(ENTRY_TXD_BURST, &txdesc->flags));
  70. rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
  71. rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
  72. rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
  73. rt2x00_desc_write(txd, 1, word);
  74. word = 0;
  75. rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
  76. skbdesc->skb_dma + txwi_size);
  77. rt2x00_desc_write(txd, 2, word);
  78. word = 0;
  79. rt2x00_set_field32(&word, TXD_W3_WIV,
  80. !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
  81. rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
  82. rt2x00_desc_write(txd, 3, word);
  83. /*
  84. * Register descriptor details in skb frame descriptor.
  85. */
  86. skbdesc->desc = txd;
  87. skbdesc->desc_len = TXD_DESC_SIZE;
  88. }
  89. EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
  90. /*
  91. * RX control handlers
  92. */
  93. void rt2800mmio_fill_rxdone(struct queue_entry *entry,
  94. struct rxdone_entry_desc *rxdesc)
  95. {
  96. struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  97. __le32 *rxd = entry_priv->desc;
  98. u32 word;
  99. rt2x00_desc_read(rxd, 3, &word);
  100. if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
  101. rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
  102. /*
  103. * Unfortunately we don't know the cipher type used during
  104. * decryption. This prevents us from correct providing
  105. * correct statistics through debugfs.
  106. */
  107. rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
  108. if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
  109. /*
  110. * Hardware has stripped IV/EIV data from 802.11 frame during
  111. * decryption. Unfortunately the descriptor doesn't contain
  112. * any fields with the EIV/IV data either, so they can't
  113. * be restored by rt2x00lib.
  114. */
  115. rxdesc->flags |= RX_FLAG_IV_STRIPPED;
  116. /*
  117. * The hardware has already checked the Michael Mic and has
  118. * stripped it from the frame. Signal this to mac80211.
  119. */
  120. rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
  121. if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
  122. rxdesc->flags |= RX_FLAG_DECRYPTED;
  123. else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
  124. rxdesc->flags |= RX_FLAG_MMIC_ERROR;
  125. }
  126. if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
  127. rxdesc->dev_flags |= RXDONE_MY_BSS;
  128. if (rt2x00_get_field32(word, RXD_W3_L2PAD))
  129. rxdesc->dev_flags |= RXDONE_L2PAD;
  130. /*
  131. * Process the RXWI structure that is at the start of the buffer.
  132. */
  133. rt2800_process_rxwi(entry, rxdesc);
  134. }
  135. EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
  136. /*
  137. * Interrupt functions.
  138. */
  139. static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
  140. {
  141. struct ieee80211_conf conf = { .flags = 0 };
  142. struct rt2x00lib_conf libconf = { .conf = &conf };
  143. rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
  144. }
  145. static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
  146. {
  147. __le32 *txwi;
  148. u32 word;
  149. int wcid, tx_wcid;
  150. wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
  151. txwi = rt2800_drv_get_txwi(entry);
  152. rt2x00_desc_read(txwi, 1, &word);
  153. tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
  154. return (tx_wcid == wcid);
  155. }
  156. static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
  157. {
  158. u32 status = *(u32 *)data;
  159. /*
  160. * rt2800pci hardware might reorder frames when exchanging traffic
  161. * with multiple BA enabled STAs.
  162. *
  163. * For example, a tx queue
  164. * [ STA1 | STA2 | STA1 | STA2 ]
  165. * can result in tx status reports
  166. * [ STA1 | STA1 | STA2 | STA2 ]
  167. * when the hw decides to aggregate the frames for STA1 into one AMPDU.
  168. *
  169. * To mitigate this effect, associate the tx status to the first frame
  170. * in the tx queue with a matching wcid.
  171. */
  172. if (rt2800mmio_txdone_entry_check(entry, status) &&
  173. !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
  174. /*
  175. * Got a matching frame, associate the tx status with
  176. * the frame
  177. */
  178. entry->status = status;
  179. set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
  180. return true;
  181. }
  182. /* Check the next frame */
  183. return false;
  184. }
  185. static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
  186. {
  187. u32 status = *(u32 *)data;
  188. /*
  189. * Find the first frame without tx status and assign this status to it
  190. * regardless if it matches or not.
  191. */
  192. if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
  193. /*
  194. * Got a matching frame, associate the tx status with
  195. * the frame
  196. */
  197. entry->status = status;
  198. set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
  199. return true;
  200. }
  201. /* Check the next frame */
  202. return false;
  203. }
  204. static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
  205. void *data)
  206. {
  207. if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
  208. rt2800_txdone_entry(entry, entry->status,
  209. rt2800mmio_get_txwi(entry));
  210. return false;
  211. }
  212. /* No more frames to release */
  213. return true;
  214. }
  215. static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
  216. {
  217. struct data_queue *queue;
  218. u32 status;
  219. u8 qid;
  220. int max_tx_done = 16;
  221. while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
  222. qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
  223. if (unlikely(qid >= QID_RX)) {
  224. /*
  225. * Unknown queue, this shouldn't happen. Just drop
  226. * this tx status.
  227. */
  228. rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
  229. qid);
  230. break;
  231. }
  232. queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
  233. if (unlikely(queue == NULL)) {
  234. /*
  235. * The queue is NULL, this shouldn't happen. Stop
  236. * processing here and drop the tx status
  237. */
  238. rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
  239. qid);
  240. break;
  241. }
  242. if (unlikely(rt2x00queue_empty(queue))) {
  243. /*
  244. * The queue is empty. Stop processing here
  245. * and drop the tx status.
  246. */
  247. rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
  248. qid);
  249. break;
  250. }
  251. /*
  252. * Let's associate this tx status with the first
  253. * matching frame.
  254. */
  255. if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
  256. Q_INDEX, &status,
  257. rt2800mmio_txdone_find_entry)) {
  258. /*
  259. * We cannot match the tx status to any frame, so just
  260. * use the first one.
  261. */
  262. if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
  263. Q_INDEX, &status,
  264. rt2800mmio_txdone_match_first)) {
  265. rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
  266. qid);
  267. break;
  268. }
  269. }
  270. /*
  271. * Release all frames with a valid tx status.
  272. */
  273. rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
  274. Q_INDEX, NULL,
  275. rt2800mmio_txdone_release_entries);
  276. if (--max_tx_done == 0)
  277. break;
  278. }
  279. return !max_tx_done;
  280. }
  281. static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
  282. struct rt2x00_field32 irq_field)
  283. {
  284. u32 reg;
  285. /*
  286. * Enable a single interrupt. The interrupt mask register
  287. * access needs locking.
  288. */
  289. spin_lock_irq(&rt2x00dev->irqmask_lock);
  290. rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
  291. rt2x00_set_field32(&reg, irq_field, 1);
  292. rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
  293. spin_unlock_irq(&rt2x00dev->irqmask_lock);
  294. }
  295. void rt2800mmio_txstatus_tasklet(unsigned long data)
  296. {
  297. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  298. if (rt2800mmio_txdone(rt2x00dev))
  299. tasklet_schedule(&rt2x00dev->txstatus_tasklet);
  300. /*
  301. * No need to enable the tx status interrupt here as we always
  302. * leave it enabled to minimize the possibility of a tx status
  303. * register overflow. See comment in interrupt handler.
  304. */
  305. }
  306. EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
  307. void rt2800mmio_pretbtt_tasklet(unsigned long data)
  308. {
  309. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  310. rt2x00lib_pretbtt(rt2x00dev);
  311. if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  312. rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
  313. }
  314. EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
  315. void rt2800mmio_tbtt_tasklet(unsigned long data)
  316. {
  317. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  318. struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
  319. u32 reg;
  320. rt2x00lib_beacondone(rt2x00dev);
  321. if (rt2x00dev->intf_ap_count) {
  322. /*
  323. * The rt2800pci hardware tbtt timer is off by 1us per tbtt
  324. * causing beacon skew and as a result causing problems with
  325. * some powersaving clients over time. Shorten the beacon
  326. * interval every 64 beacons by 64us to mitigate this effect.
  327. */
  328. if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
  329. rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
  330. rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
  331. (rt2x00dev->beacon_int * 16) - 1);
  332. rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
  333. } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
  334. rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
  335. rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
  336. (rt2x00dev->beacon_int * 16));
  337. rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
  338. }
  339. drv_data->tbtt_tick++;
  340. drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
  341. }
  342. if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  343. rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
  344. }
  345. EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
  346. void rt2800mmio_rxdone_tasklet(unsigned long data)
  347. {
  348. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  349. if (rt2x00mmio_rxdone(rt2x00dev))
  350. tasklet_schedule(&rt2x00dev->rxdone_tasklet);
  351. else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  352. rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
  353. }
  354. EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
  355. void rt2800mmio_autowake_tasklet(unsigned long data)
  356. {
  357. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  358. rt2800mmio_wakeup(rt2x00dev);
  359. if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  360. rt2800mmio_enable_interrupt(rt2x00dev,
  361. INT_MASK_CSR_AUTO_WAKEUP);
  362. }
  363. EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
  364. static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
  365. {
  366. u32 status;
  367. int i;
  368. /*
  369. * The TX_FIFO_STATUS interrupt needs special care. We should
  370. * read TX_STA_FIFO but we should do it immediately as otherwise
  371. * the register can overflow and we would lose status reports.
  372. *
  373. * Hence, read the TX_STA_FIFO register and copy all tx status
  374. * reports into a kernel FIFO which is handled in the txstatus
  375. * tasklet. We use a tasklet to process the tx status reports
  376. * because we can schedule the tasklet multiple times (when the
  377. * interrupt fires again during tx status processing).
  378. *
  379. * Furthermore we don't disable the TX_FIFO_STATUS
  380. * interrupt here but leave it enabled so that the TX_STA_FIFO
  381. * can also be read while the tx status tasklet gets executed.
  382. *
  383. * Since we have only one producer and one consumer we don't
  384. * need to lock the kfifo.
  385. */
  386. for (i = 0; i < rt2x00dev->tx->limit; i++) {
  387. rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
  388. if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
  389. break;
  390. if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
  391. rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
  392. break;
  393. }
  394. }
  395. /* Schedule the tasklet for processing the tx status. */
  396. tasklet_schedule(&rt2x00dev->txstatus_tasklet);
  397. }
  398. irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
  399. {
  400. struct rt2x00_dev *rt2x00dev = dev_instance;
  401. u32 reg, mask;
  402. /* Read status and ACK all interrupts */
  403. rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
  404. rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
  405. if (!reg)
  406. return IRQ_NONE;
  407. if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  408. return IRQ_HANDLED;
  409. /*
  410. * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
  411. * for interrupts and interrupt masks we can just use the value of
  412. * INT_SOURCE_CSR to create the interrupt mask.
  413. */
  414. mask = ~reg;
  415. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
  416. rt2800mmio_txstatus_interrupt(rt2x00dev);
  417. /*
  418. * Never disable the TX_FIFO_STATUS interrupt.
  419. */
  420. rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
  421. }
  422. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
  423. tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
  424. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
  425. tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
  426. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
  427. tasklet_schedule(&rt2x00dev->rxdone_tasklet);
  428. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
  429. tasklet_schedule(&rt2x00dev->autowake_tasklet);
  430. /*
  431. * Disable all interrupts for which a tasklet was scheduled right now,
  432. * the tasklet will reenable the appropriate interrupts.
  433. */
  434. spin_lock(&rt2x00dev->irqmask_lock);
  435. rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
  436. reg &= mask;
  437. rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
  438. spin_unlock(&rt2x00dev->irqmask_lock);
  439. return IRQ_HANDLED;
  440. }
  441. EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
  442. void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
  443. enum dev_state state)
  444. {
  445. u32 reg;
  446. unsigned long flags;
  447. /*
  448. * When interrupts are being enabled, the interrupt registers
  449. * should clear the register to assure a clean state.
  450. */
  451. if (state == STATE_RADIO_IRQ_ON) {
  452. rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
  453. rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
  454. }
  455. spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
  456. reg = 0;
  457. if (state == STATE_RADIO_IRQ_ON) {
  458. rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
  459. rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
  460. rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
  461. rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
  462. rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
  463. }
  464. rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
  465. spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
  466. if (state == STATE_RADIO_IRQ_OFF) {
  467. /*
  468. * Wait for possibly running tasklets to finish.
  469. */
  470. tasklet_kill(&rt2x00dev->txstatus_tasklet);
  471. tasklet_kill(&rt2x00dev->rxdone_tasklet);
  472. tasklet_kill(&rt2x00dev->autowake_tasklet);
  473. tasklet_kill(&rt2x00dev->tbtt_tasklet);
  474. tasklet_kill(&rt2x00dev->pretbtt_tasklet);
  475. }
  476. }
  477. EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
  478. /*
  479. * Queue handlers.
  480. */
  481. void rt2800mmio_start_queue(struct data_queue *queue)
  482. {
  483. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  484. u32 reg;
  485. switch (queue->qid) {
  486. case QID_RX:
  487. rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
  488. rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
  489. rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
  490. break;
  491. case QID_BEACON:
  492. rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
  493. rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
  494. rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
  495. rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
  496. rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
  497. rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
  498. rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
  499. rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
  500. break;
  501. default:
  502. break;
  503. }
  504. }
  505. EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
  506. void rt2800mmio_kick_queue(struct data_queue *queue)
  507. {
  508. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  509. struct queue_entry *entry;
  510. switch (queue->qid) {
  511. case QID_AC_VO:
  512. case QID_AC_VI:
  513. case QID_AC_BE:
  514. case QID_AC_BK:
  515. entry = rt2x00queue_get_entry(queue, Q_INDEX);
  516. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
  517. entry->entry_idx);
  518. break;
  519. case QID_MGMT:
  520. entry = rt2x00queue_get_entry(queue, Q_INDEX);
  521. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
  522. entry->entry_idx);
  523. break;
  524. default:
  525. break;
  526. }
  527. }
  528. EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
  529. void rt2800mmio_stop_queue(struct data_queue *queue)
  530. {
  531. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  532. u32 reg;
  533. switch (queue->qid) {
  534. case QID_RX:
  535. rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
  536. rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
  537. rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
  538. break;
  539. case QID_BEACON:
  540. rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
  541. rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
  542. rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
  543. rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
  544. rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
  545. rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
  546. rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
  547. rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
  548. /*
  549. * Wait for current invocation to finish. The tasklet
  550. * won't be scheduled anymore afterwards since we disabled
  551. * the TBTT and PRE TBTT timer.
  552. */
  553. tasklet_kill(&rt2x00dev->tbtt_tasklet);
  554. tasklet_kill(&rt2x00dev->pretbtt_tasklet);
  555. break;
  556. default:
  557. break;
  558. }
  559. }
  560. EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
  561. void rt2800mmio_queue_init(struct data_queue *queue)
  562. {
  563. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  564. unsigned short txwi_size, rxwi_size;
  565. rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
  566. switch (queue->qid) {
  567. case QID_RX:
  568. queue->limit = 128;
  569. queue->data_size = AGGREGATION_SIZE;
  570. queue->desc_size = RXD_DESC_SIZE;
  571. queue->winfo_size = rxwi_size;
  572. queue->priv_size = sizeof(struct queue_entry_priv_mmio);
  573. break;
  574. case QID_AC_VO:
  575. case QID_AC_VI:
  576. case QID_AC_BE:
  577. case QID_AC_BK:
  578. queue->limit = 64;
  579. queue->data_size = AGGREGATION_SIZE;
  580. queue->desc_size = TXD_DESC_SIZE;
  581. queue->winfo_size = txwi_size;
  582. queue->priv_size = sizeof(struct queue_entry_priv_mmio);
  583. break;
  584. case QID_BEACON:
  585. queue->limit = 8;
  586. queue->data_size = 0; /* No DMA required for beacons */
  587. queue->desc_size = TXD_DESC_SIZE;
  588. queue->winfo_size = txwi_size;
  589. queue->priv_size = sizeof(struct queue_entry_priv_mmio);
  590. break;
  591. case QID_ATIM:
  592. /* fallthrough */
  593. default:
  594. BUG();
  595. break;
  596. }
  597. }
  598. EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
  599. /*
  600. * Initialization functions.
  601. */
  602. bool rt2800mmio_get_entry_state(struct queue_entry *entry)
  603. {
  604. struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  605. u32 word;
  606. if (entry->queue->qid == QID_RX) {
  607. rt2x00_desc_read(entry_priv->desc, 1, &word);
  608. return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
  609. } else {
  610. rt2x00_desc_read(entry_priv->desc, 1, &word);
  611. return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
  612. }
  613. }
  614. EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
  615. void rt2800mmio_clear_entry(struct queue_entry *entry)
  616. {
  617. struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  618. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  619. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  620. u32 word;
  621. if (entry->queue->qid == QID_RX) {
  622. rt2x00_desc_read(entry_priv->desc, 0, &word);
  623. rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
  624. rt2x00_desc_write(entry_priv->desc, 0, word);
  625. rt2x00_desc_read(entry_priv->desc, 1, &word);
  626. rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
  627. rt2x00_desc_write(entry_priv->desc, 1, word);
  628. /*
  629. * Set RX IDX in register to inform hardware that we have
  630. * handled this entry and it is available for reuse again.
  631. */
  632. rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
  633. entry->entry_idx);
  634. } else {
  635. rt2x00_desc_read(entry_priv->desc, 1, &word);
  636. rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
  637. rt2x00_desc_write(entry_priv->desc, 1, word);
  638. }
  639. }
  640. EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
  641. int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
  642. {
  643. struct queue_entry_priv_mmio *entry_priv;
  644. /*
  645. * Initialize registers.
  646. */
  647. entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
  648. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
  649. entry_priv->desc_dma);
  650. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
  651. rt2x00dev->tx[0].limit);
  652. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
  653. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
  654. entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
  655. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
  656. entry_priv->desc_dma);
  657. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
  658. rt2x00dev->tx[1].limit);
  659. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
  660. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
  661. entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
  662. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
  663. entry_priv->desc_dma);
  664. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
  665. rt2x00dev->tx[2].limit);
  666. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
  667. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
  668. entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
  669. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
  670. entry_priv->desc_dma);
  671. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
  672. rt2x00dev->tx[3].limit);
  673. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
  674. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
  675. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
  676. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
  677. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
  678. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
  679. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
  680. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
  681. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
  682. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
  683. entry_priv = rt2x00dev->rx->entries[0].priv_data;
  684. rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
  685. entry_priv->desc_dma);
  686. rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
  687. rt2x00dev->rx[0].limit);
  688. rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
  689. rt2x00dev->rx[0].limit - 1);
  690. rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
  691. rt2800_disable_wpdma(rt2x00dev);
  692. rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
  693. return 0;
  694. }
  695. EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
  696. int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
  697. {
  698. u32 reg;
  699. /*
  700. * Reset DMA indexes
  701. */
  702. rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
  703. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
  704. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
  705. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
  706. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
  707. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
  708. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
  709. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
  710. rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
  711. rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
  712. rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
  713. if (rt2x00_is_pcie(rt2x00dev) &&
  714. (rt2x00_rt(rt2x00dev, RT3090) ||
  715. rt2x00_rt(rt2x00dev, RT3390) ||
  716. rt2x00_rt(rt2x00dev, RT3572) ||
  717. rt2x00_rt(rt2x00dev, RT3593) ||
  718. rt2x00_rt(rt2x00dev, RT5390) ||
  719. rt2x00_rt(rt2x00dev, RT5392) ||
  720. rt2x00_rt(rt2x00dev, RT5592))) {
  721. rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
  722. rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
  723. rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
  724. rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
  725. }
  726. rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
  727. reg = 0;
  728. rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
  729. rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
  730. rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
  731. rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
  732. return 0;
  733. }
  734. EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
  735. /*
  736. * Device state switch handlers.
  737. */
  738. int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
  739. {
  740. /* Wait for DMA, ignore error until we initialize queues. */
  741. rt2800_wait_wpdma_ready(rt2x00dev);
  742. if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
  743. return -EIO;
  744. return rt2800_enable_radio(rt2x00dev);
  745. }
  746. EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
  747. MODULE_AUTHOR(DRV_PROJECT);
  748. MODULE_VERSION(DRV_VERSION);
  749. MODULE_DESCRIPTION("rt2800 MMIO library");
  750. MODULE_LICENSE("GPL");