rt2x00usb.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. /*
  2. Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
  3. Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  4. <http://rt2x00.serialmonkey.com>
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 2 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; if not, see <http://www.gnu.org/licenses/>.
  15. */
  16. /*
  17. Module: rt2x00usb
  18. Abstract: rt2x00 generic usb device routines.
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/slab.h>
  23. #include <linux/usb.h>
  24. #include <linux/bug.h>
  25. #include "rt2x00.h"
  26. #include "rt2x00usb.h"
  27. /*
  28. * Interfacing with the HW.
  29. */
  30. int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
  31. const u8 request, const u8 requesttype,
  32. const u16 offset, const u16 value,
  33. void *buffer, const u16 buffer_length,
  34. const int timeout)
  35. {
  36. struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
  37. int status;
  38. unsigned int pipe =
  39. (requesttype == USB_VENDOR_REQUEST_IN) ?
  40. usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
  41. unsigned long expire = jiffies + msecs_to_jiffies(timeout);
  42. if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
  43. return -ENODEV;
  44. do {
  45. status = usb_control_msg(usb_dev, pipe, request, requesttype,
  46. value, offset, buffer, buffer_length,
  47. timeout / 2);
  48. if (status >= 0)
  49. return 0;
  50. if (status == -ENODEV) {
  51. /* Device has disappeared. */
  52. clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
  53. break;
  54. }
  55. } while (time_before(jiffies, expire));
  56. rt2x00_err(rt2x00dev,
  57. "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
  58. request, offset, status);
  59. return status;
  60. }
  61. EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
  62. int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
  63. const u8 request, const u8 requesttype,
  64. const u16 offset, void *buffer,
  65. const u16 buffer_length, const int timeout)
  66. {
  67. int status;
  68. BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex));
  69. /*
  70. * Check for Cache availability.
  71. */
  72. if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
  73. rt2x00_err(rt2x00dev, "CSR cache not available\n");
  74. return -ENOMEM;
  75. }
  76. if (requesttype == USB_VENDOR_REQUEST_OUT)
  77. memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
  78. status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
  79. offset, 0, rt2x00dev->csr.cache,
  80. buffer_length, timeout);
  81. if (!status && requesttype == USB_VENDOR_REQUEST_IN)
  82. memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
  83. return status;
  84. }
  85. EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
  86. int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
  87. const u8 request, const u8 requesttype,
  88. const u16 offset, void *buffer,
  89. const u16 buffer_length)
  90. {
  91. int status = 0;
  92. unsigned char *tb;
  93. u16 off, len, bsize;
  94. mutex_lock(&rt2x00dev->csr_mutex);
  95. tb = (char *)buffer;
  96. off = offset;
  97. len = buffer_length;
  98. while (len && !status) {
  99. bsize = min_t(u16, CSR_CACHE_SIZE, len);
  100. status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
  101. requesttype, off, tb,
  102. bsize, REGISTER_TIMEOUT);
  103. tb += bsize;
  104. len -= bsize;
  105. off += bsize;
  106. }
  107. mutex_unlock(&rt2x00dev->csr_mutex);
  108. return status;
  109. }
  110. EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
  111. int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
  112. const unsigned int offset,
  113. const struct rt2x00_field32 field,
  114. u32 *reg)
  115. {
  116. unsigned int i;
  117. if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
  118. return -ENODEV;
  119. for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
  120. rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
  121. if (!rt2x00_get_field32(*reg, field))
  122. return 1;
  123. udelay(REGISTER_BUSY_DELAY);
  124. }
  125. rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n",
  126. offset, *reg);
  127. *reg = ~0;
  128. return 0;
  129. }
  130. EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
  131. struct rt2x00_async_read_data {
  132. __le32 reg;
  133. struct usb_ctrlrequest cr;
  134. struct rt2x00_dev *rt2x00dev;
  135. bool (*callback)(struct rt2x00_dev *, int, u32);
  136. };
  137. static void rt2x00usb_register_read_async_cb(struct urb *urb)
  138. {
  139. struct rt2x00_async_read_data *rd = urb->context;
  140. if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
  141. if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
  142. kfree(rd);
  143. } else
  144. kfree(rd);
  145. }
  146. void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
  147. const unsigned int offset,
  148. bool (*callback)(struct rt2x00_dev*, int, u32))
  149. {
  150. struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
  151. struct urb *urb;
  152. struct rt2x00_async_read_data *rd;
  153. rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
  154. if (!rd)
  155. return;
  156. urb = usb_alloc_urb(0, GFP_ATOMIC);
  157. if (!urb) {
  158. kfree(rd);
  159. return;
  160. }
  161. rd->rt2x00dev = rt2x00dev;
  162. rd->callback = callback;
  163. rd->cr.bRequestType = USB_VENDOR_REQUEST_IN;
  164. rd->cr.bRequest = USB_MULTI_READ;
  165. rd->cr.wValue = 0;
  166. rd->cr.wIndex = cpu_to_le16(offset);
  167. rd->cr.wLength = cpu_to_le16(sizeof(u32));
  168. usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
  169. (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
  170. rt2x00usb_register_read_async_cb, rd);
  171. if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
  172. kfree(rd);
  173. usb_free_urb(urb);
  174. }
  175. EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
  176. /*
  177. * TX data handlers.
  178. */
  179. static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
  180. {
  181. /*
  182. * If the transfer to hardware succeeded, it does not mean the
  183. * frame was send out correctly. It only means the frame
  184. * was successfully pushed to the hardware, we have no
  185. * way to determine the transmission status right now.
  186. * (Only indirectly by looking at the failed TX counters
  187. * in the register).
  188. */
  189. if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
  190. rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
  191. else
  192. rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
  193. }
  194. static void rt2x00usb_work_txdone(struct work_struct *work)
  195. {
  196. struct rt2x00_dev *rt2x00dev =
  197. container_of(work, struct rt2x00_dev, txdone_work);
  198. struct data_queue *queue;
  199. struct queue_entry *entry;
  200. tx_queue_for_each(rt2x00dev, queue) {
  201. while (!rt2x00queue_empty(queue)) {
  202. entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
  203. if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
  204. !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
  205. break;
  206. rt2x00usb_work_txdone_entry(entry);
  207. }
  208. }
  209. }
  210. static void rt2x00usb_interrupt_txdone(struct urb *urb)
  211. {
  212. struct queue_entry *entry = (struct queue_entry *)urb->context;
  213. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  214. if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
  215. return;
  216. /*
  217. * Check if the frame was correctly uploaded
  218. */
  219. if (urb->status)
  220. set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
  221. /*
  222. * Report the frame as DMA done
  223. */
  224. rt2x00lib_dmadone(entry);
  225. if (rt2x00dev->ops->lib->tx_dma_done)
  226. rt2x00dev->ops->lib->tx_dma_done(entry);
  227. /*
  228. * Schedule the delayed work for reading the TX status
  229. * from the device.
  230. */
  231. if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO) ||
  232. !kfifo_is_empty(&rt2x00dev->txstatus_fifo))
  233. queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
  234. }
  235. static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
  236. {
  237. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  238. struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
  239. struct queue_entry_priv_usb *entry_priv = entry->priv_data;
  240. u32 length;
  241. int status;
  242. if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
  243. test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
  244. return false;
  245. /*
  246. * USB devices require certain padding at the end of each frame
  247. * and urb. Those paddings are not included in skbs. Pass entry
  248. * to the driver to determine what the overall length should be.
  249. */
  250. length = rt2x00dev->ops->lib->get_tx_data_len(entry);
  251. status = skb_padto(entry->skb, length);
  252. if (unlikely(status)) {
  253. /* TODO: report something more appropriate than IO_FAILED. */
  254. rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n");
  255. set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
  256. rt2x00lib_dmadone(entry);
  257. return false;
  258. }
  259. usb_fill_bulk_urb(entry_priv->urb, usb_dev,
  260. usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
  261. entry->skb->data, length,
  262. rt2x00usb_interrupt_txdone, entry);
  263. status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
  264. if (status) {
  265. if (status == -ENODEV)
  266. clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
  267. set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
  268. rt2x00lib_dmadone(entry);
  269. }
  270. return false;
  271. }
  272. /*
  273. * RX data handlers.
  274. */
  275. static void rt2x00usb_work_rxdone(struct work_struct *work)
  276. {
  277. struct rt2x00_dev *rt2x00dev =
  278. container_of(work, struct rt2x00_dev, rxdone_work);
  279. struct queue_entry *entry;
  280. struct skb_frame_desc *skbdesc;
  281. u8 rxd[32];
  282. while (!rt2x00queue_empty(rt2x00dev->rx)) {
  283. entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
  284. if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
  285. !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
  286. break;
  287. /*
  288. * Fill in desc fields of the skb descriptor
  289. */
  290. skbdesc = get_skb_frame_desc(entry->skb);
  291. skbdesc->desc = rxd;
  292. skbdesc->desc_len = entry->queue->desc_size;
  293. /*
  294. * Send the frame to rt2x00lib for further processing.
  295. */
  296. rt2x00lib_rxdone(entry, GFP_KERNEL);
  297. }
  298. }
  299. static void rt2x00usb_interrupt_rxdone(struct urb *urb)
  300. {
  301. struct queue_entry *entry = (struct queue_entry *)urb->context;
  302. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  303. if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
  304. return;
  305. /*
  306. * Report the frame as DMA done
  307. */
  308. rt2x00lib_dmadone(entry);
  309. /*
  310. * Check if the received data is simply too small
  311. * to be actually valid, or if the urb is signaling
  312. * a problem.
  313. */
  314. if (urb->actual_length < entry->queue->desc_size || urb->status)
  315. set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
  316. /*
  317. * Schedule the delayed work for reading the RX status
  318. * from the device.
  319. */
  320. queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
  321. }
  322. static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
  323. {
  324. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  325. struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
  326. struct queue_entry_priv_usb *entry_priv = entry->priv_data;
  327. int status;
  328. if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
  329. test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
  330. return false;
  331. rt2x00lib_dmastart(entry);
  332. usb_fill_bulk_urb(entry_priv->urb, usb_dev,
  333. usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
  334. entry->skb->data, entry->skb->len,
  335. rt2x00usb_interrupt_rxdone, entry);
  336. status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
  337. if (status) {
  338. if (status == -ENODEV)
  339. clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
  340. set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
  341. rt2x00lib_dmadone(entry);
  342. }
  343. return false;
  344. }
  345. void rt2x00usb_kick_queue(struct data_queue *queue)
  346. {
  347. switch (queue->qid) {
  348. case QID_AC_VO:
  349. case QID_AC_VI:
  350. case QID_AC_BE:
  351. case QID_AC_BK:
  352. if (!rt2x00queue_empty(queue))
  353. rt2x00queue_for_each_entry(queue,
  354. Q_INDEX_DONE,
  355. Q_INDEX,
  356. NULL,
  357. rt2x00usb_kick_tx_entry);
  358. break;
  359. case QID_RX:
  360. if (!rt2x00queue_full(queue))
  361. rt2x00queue_for_each_entry(queue,
  362. Q_INDEX,
  363. Q_INDEX_DONE,
  364. NULL,
  365. rt2x00usb_kick_rx_entry);
  366. break;
  367. default:
  368. break;
  369. }
  370. }
  371. EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
  372. static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
  373. {
  374. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  375. struct queue_entry_priv_usb *entry_priv = entry->priv_data;
  376. struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
  377. if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
  378. return false;
  379. usb_kill_urb(entry_priv->urb);
  380. /*
  381. * Kill guardian urb (if required by driver).
  382. */
  383. if ((entry->queue->qid == QID_BEACON) &&
  384. (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)))
  385. usb_kill_urb(bcn_priv->guardian_urb);
  386. return false;
  387. }
  388. void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
  389. {
  390. struct work_struct *completion;
  391. unsigned int i;
  392. if (drop)
  393. rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
  394. rt2x00usb_flush_entry);
  395. /*
  396. * Obtain the queue completion handler
  397. */
  398. switch (queue->qid) {
  399. case QID_AC_VO:
  400. case QID_AC_VI:
  401. case QID_AC_BE:
  402. case QID_AC_BK:
  403. completion = &queue->rt2x00dev->txdone_work;
  404. break;
  405. case QID_RX:
  406. completion = &queue->rt2x00dev->rxdone_work;
  407. break;
  408. default:
  409. return;
  410. }
  411. for (i = 0; i < 10; i++) {
  412. /*
  413. * Check if the driver is already done, otherwise we
  414. * have to sleep a little while to give the driver/hw
  415. * the oppurtunity to complete interrupt process itself.
  416. */
  417. if (rt2x00queue_empty(queue))
  418. break;
  419. /*
  420. * Schedule the completion handler manually, when this
  421. * worker function runs, it should cleanup the queue.
  422. */
  423. queue_work(queue->rt2x00dev->workqueue, completion);
  424. /*
  425. * Wait for a little while to give the driver
  426. * the oppurtunity to recover itself.
  427. */
  428. msleep(10);
  429. }
  430. }
  431. EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
  432. static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
  433. {
  434. rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
  435. queue->qid);
  436. rt2x00queue_stop_queue(queue);
  437. rt2x00queue_flush_queue(queue, true);
  438. rt2x00queue_start_queue(queue);
  439. }
  440. static int rt2x00usb_dma_timeout(struct data_queue *queue)
  441. {
  442. struct queue_entry *entry;
  443. entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
  444. return rt2x00queue_dma_timeout(entry);
  445. }
  446. void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
  447. {
  448. struct data_queue *queue;
  449. tx_queue_for_each(rt2x00dev, queue) {
  450. if (!rt2x00queue_empty(queue)) {
  451. if (rt2x00usb_dma_timeout(queue))
  452. rt2x00usb_watchdog_tx_dma(queue);
  453. }
  454. }
  455. }
  456. EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
  457. /*
  458. * Radio handlers
  459. */
  460. void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
  461. {
  462. rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
  463. REGISTER_TIMEOUT);
  464. }
  465. EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
  466. /*
  467. * Device initialization handlers.
  468. */
  469. void rt2x00usb_clear_entry(struct queue_entry *entry)
  470. {
  471. entry->flags = 0;
  472. if (entry->queue->qid == QID_RX)
  473. rt2x00usb_kick_rx_entry(entry, NULL);
  474. }
  475. EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
  476. static void rt2x00usb_assign_endpoint(struct data_queue *queue,
  477. struct usb_endpoint_descriptor *ep_desc)
  478. {
  479. struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
  480. int pipe;
  481. queue->usb_endpoint = usb_endpoint_num(ep_desc);
  482. if (queue->qid == QID_RX) {
  483. pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
  484. queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
  485. } else {
  486. pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
  487. queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
  488. }
  489. if (!queue->usb_maxpacket)
  490. queue->usb_maxpacket = 1;
  491. }
  492. static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
  493. {
  494. struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
  495. struct usb_host_interface *intf_desc = intf->cur_altsetting;
  496. struct usb_endpoint_descriptor *ep_desc;
  497. struct data_queue *queue = rt2x00dev->tx;
  498. struct usb_endpoint_descriptor *tx_ep_desc = NULL;
  499. unsigned int i;
  500. /*
  501. * Walk through all available endpoints to search for "bulk in"
  502. * and "bulk out" endpoints. When we find such endpoints collect
  503. * the information we need from the descriptor and assign it
  504. * to the queue.
  505. */
  506. for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
  507. ep_desc = &intf_desc->endpoint[i].desc;
  508. if (usb_endpoint_is_bulk_in(ep_desc)) {
  509. rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
  510. } else if (usb_endpoint_is_bulk_out(ep_desc) &&
  511. (queue != queue_end(rt2x00dev))) {
  512. rt2x00usb_assign_endpoint(queue, ep_desc);
  513. queue = queue_next(queue);
  514. tx_ep_desc = ep_desc;
  515. }
  516. }
  517. /*
  518. * At least 1 endpoint for RX and 1 endpoint for TX must be available.
  519. */
  520. if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
  521. rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
  522. return -EPIPE;
  523. }
  524. /*
  525. * It might be possible not all queues have a dedicated endpoint.
  526. * Loop through all TX queues and copy the endpoint information
  527. * which we have gathered from already assigned endpoints.
  528. */
  529. txall_queue_for_each(rt2x00dev, queue) {
  530. if (!queue->usb_endpoint)
  531. rt2x00usb_assign_endpoint(queue, tx_ep_desc);
  532. }
  533. return 0;
  534. }
  535. static int rt2x00usb_alloc_entries(struct data_queue *queue)
  536. {
  537. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  538. struct queue_entry_priv_usb *entry_priv;
  539. struct queue_entry_priv_usb_bcn *bcn_priv;
  540. unsigned int i;
  541. for (i = 0; i < queue->limit; i++) {
  542. entry_priv = queue->entries[i].priv_data;
  543. entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
  544. if (!entry_priv->urb)
  545. return -ENOMEM;
  546. }
  547. /*
  548. * If this is not the beacon queue or
  549. * no guardian byte was required for the beacon,
  550. * then we are done.
  551. */
  552. if (queue->qid != QID_BEACON ||
  553. !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))
  554. return 0;
  555. for (i = 0; i < queue->limit; i++) {
  556. bcn_priv = queue->entries[i].priv_data;
  557. bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
  558. if (!bcn_priv->guardian_urb)
  559. return -ENOMEM;
  560. }
  561. return 0;
  562. }
  563. static void rt2x00usb_free_entries(struct data_queue *queue)
  564. {
  565. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  566. struct queue_entry_priv_usb *entry_priv;
  567. struct queue_entry_priv_usb_bcn *bcn_priv;
  568. unsigned int i;
  569. if (!queue->entries)
  570. return;
  571. for (i = 0; i < queue->limit; i++) {
  572. entry_priv = queue->entries[i].priv_data;
  573. usb_kill_urb(entry_priv->urb);
  574. usb_free_urb(entry_priv->urb);
  575. }
  576. /*
  577. * If this is not the beacon queue or
  578. * no guardian byte was required for the beacon,
  579. * then we are done.
  580. */
  581. if (queue->qid != QID_BEACON ||
  582. !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))
  583. return;
  584. for (i = 0; i < queue->limit; i++) {
  585. bcn_priv = queue->entries[i].priv_data;
  586. usb_kill_urb(bcn_priv->guardian_urb);
  587. usb_free_urb(bcn_priv->guardian_urb);
  588. }
  589. }
  590. int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
  591. {
  592. struct data_queue *queue;
  593. int status;
  594. /*
  595. * Find endpoints for each queue
  596. */
  597. status = rt2x00usb_find_endpoints(rt2x00dev);
  598. if (status)
  599. goto exit;
  600. /*
  601. * Allocate DMA
  602. */
  603. queue_for_each(rt2x00dev, queue) {
  604. status = rt2x00usb_alloc_entries(queue);
  605. if (status)
  606. goto exit;
  607. }
  608. return 0;
  609. exit:
  610. rt2x00usb_uninitialize(rt2x00dev);
  611. return status;
  612. }
  613. EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
  614. void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
  615. {
  616. struct data_queue *queue;
  617. queue_for_each(rt2x00dev, queue)
  618. rt2x00usb_free_entries(queue);
  619. }
  620. EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
  621. /*
  622. * USB driver handlers.
  623. */
  624. static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
  625. {
  626. kfree(rt2x00dev->rf);
  627. rt2x00dev->rf = NULL;
  628. kfree(rt2x00dev->eeprom);
  629. rt2x00dev->eeprom = NULL;
  630. kfree(rt2x00dev->csr.cache);
  631. rt2x00dev->csr.cache = NULL;
  632. }
  633. static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
  634. {
  635. rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
  636. if (!rt2x00dev->csr.cache)
  637. goto exit;
  638. rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
  639. if (!rt2x00dev->eeprom)
  640. goto exit;
  641. rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
  642. if (!rt2x00dev->rf)
  643. goto exit;
  644. return 0;
  645. exit:
  646. rt2x00_probe_err("Failed to allocate registers\n");
  647. rt2x00usb_free_reg(rt2x00dev);
  648. return -ENOMEM;
  649. }
  650. int rt2x00usb_probe(struct usb_interface *usb_intf,
  651. const struct rt2x00_ops *ops)
  652. {
  653. struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
  654. struct ieee80211_hw *hw;
  655. struct rt2x00_dev *rt2x00dev;
  656. int retval;
  657. usb_dev = usb_get_dev(usb_dev);
  658. usb_reset_device(usb_dev);
  659. hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
  660. if (!hw) {
  661. rt2x00_probe_err("Failed to allocate hardware\n");
  662. retval = -ENOMEM;
  663. goto exit_put_device;
  664. }
  665. usb_set_intfdata(usb_intf, hw);
  666. rt2x00dev = hw->priv;
  667. rt2x00dev->dev = &usb_intf->dev;
  668. rt2x00dev->ops = ops;
  669. rt2x00dev->hw = hw;
  670. rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
  671. INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
  672. INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
  673. hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
  674. HRTIMER_MODE_REL);
  675. retval = rt2x00usb_alloc_reg(rt2x00dev);
  676. if (retval)
  677. goto exit_free_device;
  678. retval = rt2x00lib_probe_dev(rt2x00dev);
  679. if (retval)
  680. goto exit_free_reg;
  681. return 0;
  682. exit_free_reg:
  683. rt2x00usb_free_reg(rt2x00dev);
  684. exit_free_device:
  685. ieee80211_free_hw(hw);
  686. exit_put_device:
  687. usb_put_dev(usb_dev);
  688. usb_set_intfdata(usb_intf, NULL);
  689. return retval;
  690. }
  691. EXPORT_SYMBOL_GPL(rt2x00usb_probe);
  692. void rt2x00usb_disconnect(struct usb_interface *usb_intf)
  693. {
  694. struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
  695. struct rt2x00_dev *rt2x00dev = hw->priv;
  696. /*
  697. * Free all allocated data.
  698. */
  699. rt2x00lib_remove_dev(rt2x00dev);
  700. rt2x00usb_free_reg(rt2x00dev);
  701. ieee80211_free_hw(hw);
  702. /*
  703. * Free the USB device data.
  704. */
  705. usb_set_intfdata(usb_intf, NULL);
  706. usb_put_dev(interface_to_usbdev(usb_intf));
  707. }
  708. EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
  709. #ifdef CONFIG_PM
  710. int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
  711. {
  712. struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
  713. struct rt2x00_dev *rt2x00dev = hw->priv;
  714. return rt2x00lib_suspend(rt2x00dev, state);
  715. }
  716. EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
  717. int rt2x00usb_resume(struct usb_interface *usb_intf)
  718. {
  719. struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
  720. struct rt2x00_dev *rt2x00dev = hw->priv;
  721. return rt2x00lib_resume(rt2x00dev);
  722. }
  723. EXPORT_SYMBOL_GPL(rt2x00usb_resume);
  724. #endif /* CONFIG_PM */
  725. /*
  726. * rt2x00usb module information.
  727. */
  728. MODULE_AUTHOR(DRV_PROJECT);
  729. MODULE_VERSION(DRV_VERSION);
  730. MODULE_DESCRIPTION("rt2x00 usb library");
  731. MODULE_LICENSE("GPL");