gr_udc.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278
  1. /*
  2. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  3. *
  4. * 2013 (c) Aeroflex Gaisler AB
  5. *
  6. * This driver supports GRUSBDC USB Device Controller cores available in the
  7. * GRLIB VHDL IP core library.
  8. *
  9. * Full documentation of the GRUSBDC core can be found here:
  10. * http://www.gaisler.com/products/grlib/grip.pdf
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. *
  17. * Contributors:
  18. * - Andreas Larsson <andreas@gaisler.com>
  19. * - Marko Isomaki
  20. */
  21. /*
  22. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  23. * individually configurable to any of the four USB transfer types. This driver
  24. * only supports cores in DMA mode.
  25. */
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/errno.h>
  31. #include <linux/list.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/device.h>
  34. #include <linux/usb/ch9.h>
  35. #include <linux/usb/gadget.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/dmapool.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/of_irq.h>
  42. #include <linux/of_address.h>
  43. #include <asm/byteorder.h>
  44. #include "gr_udc.h"
  45. #define DRIVER_NAME "gr_udc"
  46. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  47. static const char driver_name[] = DRIVER_NAME;
  48. static const char driver_desc[] = DRIVER_DESC;
  49. #define gr_read32(x) (ioread32be((x)))
  50. #define gr_write32(x, v) (iowrite32be((v), (x)))
  51. /* USB speed and corresponding string calculated from status register value */
  52. #define GR_SPEED(status) \
  53. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  54. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  55. /* Size of hardware buffer calculated from epctrl register value */
  56. #define GR_BUFFER_SIZE(epctrl) \
  57. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  58. GR_EPCTRL_BUFSZ_SCALER)
  59. /* ---------------------------------------------------------------------- */
  60. /* Debug printout functionality */
  61. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  62. static const char *gr_ep0state_string(enum gr_ep0state state)
  63. {
  64. static const char *const names[] = {
  65. [GR_EP0_DISCONNECT] = "disconnect",
  66. [GR_EP0_SETUP] = "setup",
  67. [GR_EP0_IDATA] = "idata",
  68. [GR_EP0_ODATA] = "odata",
  69. [GR_EP0_ISTATUS] = "istatus",
  70. [GR_EP0_OSTATUS] = "ostatus",
  71. [GR_EP0_STALL] = "stall",
  72. [GR_EP0_SUSPEND] = "suspend",
  73. };
  74. if (state < 0 || state >= ARRAY_SIZE(names))
  75. return "UNKNOWN";
  76. return names[state];
  77. }
  78. #ifdef VERBOSE_DEBUG
  79. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  80. struct gr_request *req)
  81. {
  82. int buflen = ep->is_in ? req->req.length : req->req.actual;
  83. int rowlen = 32;
  84. int plen = min(rowlen, buflen);
  85. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  86. (buflen > plen ? " (truncated)" : ""));
  87. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  88. rowlen, 4, req->req.buf, plen, false);
  89. }
  90. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  91. u16 value, u16 index, u16 length)
  92. {
  93. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  94. type, request, value, index, length);
  95. }
  96. #else /* !VERBOSE_DEBUG */
  97. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  98. struct gr_request *req) {}
  99. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  100. u16 value, u16 index, u16 length) {}
  101. #endif /* VERBOSE_DEBUG */
  102. /* ---------------------------------------------------------------------- */
  103. /* Debugfs functionality */
  104. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  105. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  106. {
  107. u32 epctrl = gr_read32(&ep->regs->epctrl);
  108. u32 epstat = gr_read32(&ep->regs->epstat);
  109. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  110. struct gr_request *req;
  111. seq_printf(seq, "%s:\n", ep->ep.name);
  112. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  113. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  114. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  115. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  116. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  117. seq_printf(seq, " stopped = %d\n", ep->stopped);
  118. seq_printf(seq, " wedged = %d\n", ep->wedged);
  119. seq_printf(seq, " callback = %d\n", ep->callback);
  120. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  121. seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
  122. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  123. if (mode == 1 || mode == 3)
  124. seq_printf(seq, " nt = %d\n",
  125. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  126. seq_printf(seq, " Buffer 0: %s %s%d\n",
  127. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  128. epstat & GR_EPSTAT_BS ? " " : "selected ",
  129. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  130. seq_printf(seq, " Buffer 1: %s %s%d\n",
  131. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  132. epstat & GR_EPSTAT_BS ? "selected " : " ",
  133. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  134. if (list_empty(&ep->queue)) {
  135. seq_puts(seq, " Queue: empty\n\n");
  136. return;
  137. }
  138. seq_puts(seq, " Queue:\n");
  139. list_for_each_entry(req, &ep->queue, queue) {
  140. struct gr_dma_desc *desc;
  141. struct gr_dma_desc *next;
  142. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  143. &req->req.buf, req->req.actual, req->req.length);
  144. next = req->first_desc;
  145. do {
  146. desc = next;
  147. next = desc->next_desc;
  148. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  149. desc == req->curr_desc ? 'c' : ' ',
  150. desc, desc->paddr, desc->ctrl, desc->data);
  151. } while (desc != req->last_desc);
  152. }
  153. seq_puts(seq, "\n");
  154. }
  155. static int gr_seq_show(struct seq_file *seq, void *v)
  156. {
  157. struct gr_udc *dev = seq->private;
  158. u32 control = gr_read32(&dev->regs->control);
  159. u32 status = gr_read32(&dev->regs->status);
  160. struct gr_ep *ep;
  161. seq_printf(seq, "usb state = %s\n",
  162. usb_state_string(dev->gadget.state));
  163. seq_printf(seq, "address = %d\n",
  164. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  165. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  166. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  167. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  168. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  169. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  170. seq_puts(seq, "\n");
  171. list_for_each_entry(ep, &dev->ep_list, ep_list)
  172. gr_seq_ep_show(seq, ep);
  173. return 0;
  174. }
  175. static int gr_dfs_open(struct inode *inode, struct file *file)
  176. {
  177. return single_open(file, gr_seq_show, inode->i_private);
  178. }
  179. static const struct file_operations gr_dfs_fops = {
  180. .owner = THIS_MODULE,
  181. .open = gr_dfs_open,
  182. .read = seq_read,
  183. .llseek = seq_lseek,
  184. .release = single_release,
  185. };
  186. static void gr_dfs_create(struct gr_udc *dev)
  187. {
  188. const char *name = "gr_udc_state";
  189. dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
  190. dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
  191. &gr_dfs_fops);
  192. }
  193. static void gr_dfs_delete(struct gr_udc *dev)
  194. {
  195. /* Handles NULL and ERR pointers internally */
  196. debugfs_remove(dev->dfs_state);
  197. debugfs_remove(dev->dfs_root);
  198. }
  199. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  200. static void gr_dfs_create(struct gr_udc *dev) {}
  201. static void gr_dfs_delete(struct gr_udc *dev) {}
  202. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  203. /* ---------------------------------------------------------------------- */
  204. /* DMA and request handling */
  205. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  206. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  207. {
  208. dma_addr_t paddr;
  209. struct gr_dma_desc *dma_desc;
  210. dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
  211. if (!dma_desc) {
  212. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  213. return NULL;
  214. }
  215. memset(dma_desc, 0, sizeof(*dma_desc));
  216. dma_desc->paddr = paddr;
  217. return dma_desc;
  218. }
  219. static inline void gr_free_dma_desc(struct gr_udc *dev,
  220. struct gr_dma_desc *desc)
  221. {
  222. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  223. }
  224. /* Frees the chain of struct gr_dma_desc for the given request */
  225. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  226. {
  227. struct gr_dma_desc *desc;
  228. struct gr_dma_desc *next;
  229. next = req->first_desc;
  230. if (!next)
  231. return;
  232. do {
  233. desc = next;
  234. next = desc->next_desc;
  235. gr_free_dma_desc(dev, desc);
  236. } while (desc != req->last_desc);
  237. req->first_desc = NULL;
  238. req->curr_desc = NULL;
  239. req->last_desc = NULL;
  240. }
  241. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  242. /*
  243. * Frees allocated resources and calls the appropriate completion function/setup
  244. * package handler for a finished request.
  245. *
  246. * Must be called with dev->lock held and irqs disabled.
  247. */
  248. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  249. int status)
  250. __releases(&dev->lock)
  251. __acquires(&dev->lock)
  252. {
  253. struct gr_udc *dev;
  254. list_del_init(&req->queue);
  255. if (likely(req->req.status == -EINPROGRESS))
  256. req->req.status = status;
  257. else
  258. status = req->req.status;
  259. dev = ep->dev;
  260. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  261. gr_free_dma_desc_chain(dev, req);
  262. if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
  263. req->req.actual = req->req.length;
  264. } else if (req->oddlen && req->req.actual > req->evenlen) {
  265. /*
  266. * Copy to user buffer in this case where length was not evenly
  267. * divisible by ep->ep.maxpacket and the last descriptor was
  268. * actually used.
  269. */
  270. char *buftail = ((char *)req->req.buf + req->evenlen);
  271. memcpy(buftail, ep->tailbuf, req->oddlen);
  272. if (req->req.actual > req->req.length) {
  273. /* We got more data than was requested */
  274. dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
  275. ep->ep.name);
  276. gr_dbgprint_request("OVFL", ep, req);
  277. req->req.status = -EOVERFLOW;
  278. }
  279. }
  280. if (!status) {
  281. if (ep->is_in)
  282. gr_dbgprint_request("SENT", ep, req);
  283. else
  284. gr_dbgprint_request("RECV", ep, req);
  285. }
  286. /* Prevent changes to ep->queue during callback */
  287. ep->callback = 1;
  288. if (req == dev->ep0reqo && !status) {
  289. if (req->setup)
  290. gr_ep0_setup(dev, req);
  291. else
  292. dev_err(dev->dev,
  293. "Unexpected non setup packet on ep0in\n");
  294. } else if (req->req.complete) {
  295. spin_unlock(&dev->lock);
  296. usb_gadget_giveback_request(&ep->ep, &req->req);
  297. spin_lock(&dev->lock);
  298. }
  299. ep->callback = 0;
  300. }
  301. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  302. {
  303. struct gr_request *req;
  304. req = kzalloc(sizeof(*req), gfp_flags);
  305. if (!req)
  306. return NULL;
  307. INIT_LIST_HEAD(&req->queue);
  308. return &req->req;
  309. }
  310. /*
  311. * Starts DMA for endpoint ep if there are requests in the queue.
  312. *
  313. * Must be called with dev->lock held and with !ep->stopped.
  314. */
  315. static void gr_start_dma(struct gr_ep *ep)
  316. {
  317. struct gr_request *req;
  318. u32 dmactrl;
  319. if (list_empty(&ep->queue)) {
  320. ep->dma_start = 0;
  321. return;
  322. }
  323. req = list_first_entry(&ep->queue, struct gr_request, queue);
  324. /* A descriptor should already have been allocated */
  325. BUG_ON(!req->curr_desc);
  326. /*
  327. * The DMA controller can not handle smaller OUT buffers than
  328. * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
  329. * long packet are received. Therefore an internal bounce buffer gets
  330. * used when such a request gets enabled.
  331. */
  332. if (!ep->is_in && req->oddlen)
  333. req->last_desc->data = ep->tailbuf_paddr;
  334. wmb(); /* Make sure all is settled before handing it over to DMA */
  335. /* Set the descriptor pointer in the hardware */
  336. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  337. /* Announce available descriptors */
  338. dmactrl = gr_read32(&ep->regs->dmactrl);
  339. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  340. ep->dma_start = 1;
  341. }
  342. /*
  343. * Finishes the first request in the ep's queue and, if available, starts the
  344. * next request in queue.
  345. *
  346. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  347. */
  348. static void gr_dma_advance(struct gr_ep *ep, int status)
  349. {
  350. struct gr_request *req;
  351. req = list_first_entry(&ep->queue, struct gr_request, queue);
  352. gr_finish_request(ep, req, status);
  353. gr_start_dma(ep); /* Regardless of ep->dma_start */
  354. }
  355. /*
  356. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  357. * transfer to be canceled and clears GR_DMACTRL_DA.
  358. *
  359. * Must be called with dev->lock held.
  360. */
  361. static void gr_abort_dma(struct gr_ep *ep)
  362. {
  363. u32 dmactrl;
  364. dmactrl = gr_read32(&ep->regs->dmactrl);
  365. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  366. }
  367. /*
  368. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  369. * chain.
  370. *
  371. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  372. * smaller buffer than MAXPL in the OUT direction.
  373. */
  374. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  375. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  376. {
  377. struct gr_dma_desc *desc;
  378. desc = gr_alloc_dma_desc(ep, gfp_flags);
  379. if (!desc)
  380. return -ENOMEM;
  381. desc->data = data;
  382. if (ep->is_in)
  383. desc->ctrl =
  384. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  385. else
  386. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  387. if (!req->first_desc) {
  388. req->first_desc = desc;
  389. req->curr_desc = desc;
  390. } else {
  391. req->last_desc->next_desc = desc;
  392. req->last_desc->next = desc->paddr;
  393. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  394. }
  395. req->last_desc = desc;
  396. return 0;
  397. }
  398. /*
  399. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  400. * together covers req->req.length bytes of the buffer at DMA address
  401. * req->req.dma for the OUT direction.
  402. *
  403. * The first descriptor in the chain is enabled, the rest disabled. The
  404. * interrupt handler will later enable them one by one when needed so we can
  405. * find out when the transfer is finished. For OUT endpoints, all descriptors
  406. * therefore generate interrutps.
  407. */
  408. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  409. gfp_t gfp_flags)
  410. {
  411. u16 bytes_left; /* Bytes left to provide descriptors for */
  412. u16 bytes_used; /* Bytes accommodated for */
  413. int ret = 0;
  414. req->first_desc = NULL; /* Signals that no allocation is done yet */
  415. bytes_left = req->req.length;
  416. bytes_used = 0;
  417. while (bytes_left > 0) {
  418. dma_addr_t start = req->req.dma + bytes_used;
  419. u16 size = min(bytes_left, ep->bytes_per_buffer);
  420. if (size < ep->bytes_per_buffer) {
  421. /* Prepare using bounce buffer */
  422. req->evenlen = req->req.length - bytes_left;
  423. req->oddlen = size;
  424. }
  425. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  426. if (ret)
  427. goto alloc_err;
  428. bytes_left -= size;
  429. bytes_used += size;
  430. }
  431. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  432. return 0;
  433. alloc_err:
  434. gr_free_dma_desc_chain(ep->dev, req);
  435. return ret;
  436. }
  437. /*
  438. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  439. * together covers req->req.length bytes of the buffer at DMA address
  440. * req->req.dma for the IN direction.
  441. *
  442. * When more data is provided than the maximum payload size, the hardware splits
  443. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  444. * is always set to a multiple of the maximum payload (restricted to the valid
  445. * number of maximum payloads during high bandwidth isochronous or interrupt
  446. * transfers)
  447. *
  448. * All descriptors are enabled from the beginning and we only generate an
  449. * interrupt for the last one indicating that the entire request has been pushed
  450. * to hardware.
  451. */
  452. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  453. gfp_t gfp_flags)
  454. {
  455. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  456. u16 bytes_used; /* Bytes in req accommodated for */
  457. int ret = 0;
  458. req->first_desc = NULL; /* Signals that no allocation is done yet */
  459. bytes_left = req->req.length;
  460. bytes_used = 0;
  461. do { /* Allow for zero length packets */
  462. dma_addr_t start = req->req.dma + bytes_used;
  463. u16 size = min(bytes_left, ep->bytes_per_buffer);
  464. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  465. if (ret)
  466. goto alloc_err;
  467. bytes_left -= size;
  468. bytes_used += size;
  469. } while (bytes_left > 0);
  470. /*
  471. * Send an extra zero length packet to indicate that no more data is
  472. * available when req->req.zero is set and the data length is even
  473. * multiples of ep->ep.maxpacket.
  474. */
  475. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  476. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  477. if (ret)
  478. goto alloc_err;
  479. }
  480. /*
  481. * For IN packets we only want to know when the last packet has been
  482. * transmitted (not just put into internal buffers).
  483. */
  484. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  485. return 0;
  486. alloc_err:
  487. gr_free_dma_desc_chain(ep->dev, req);
  488. return ret;
  489. }
  490. /* Must be called with dev->lock held */
  491. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  492. {
  493. struct gr_udc *dev = ep->dev;
  494. int ret;
  495. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  496. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  497. return -EINVAL;
  498. }
  499. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  500. dev_err(dev->dev,
  501. "Invalid request for %s: buf=%p list_empty=%d\n",
  502. ep->ep.name, req->req.buf, list_empty(&req->queue));
  503. return -EINVAL;
  504. }
  505. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  506. dev_err(dev->dev, "-ESHUTDOWN");
  507. return -ESHUTDOWN;
  508. }
  509. /* Can't touch registers when suspended */
  510. if (dev->ep0state == GR_EP0_SUSPEND) {
  511. dev_err(dev->dev, "-EBUSY");
  512. return -EBUSY;
  513. }
  514. /* Set up DMA mapping in case the caller didn't */
  515. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  516. if (ret) {
  517. dev_err(dev->dev, "usb_gadget_map_request");
  518. return ret;
  519. }
  520. if (ep->is_in)
  521. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  522. else
  523. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  524. if (ret)
  525. return ret;
  526. req->req.status = -EINPROGRESS;
  527. req->req.actual = 0;
  528. list_add_tail(&req->queue, &ep->queue);
  529. /* Start DMA if not started, otherwise interrupt handler handles it */
  530. if (!ep->dma_start && likely(!ep->stopped))
  531. gr_start_dma(ep);
  532. return 0;
  533. }
  534. /*
  535. * Queue a request from within the driver.
  536. *
  537. * Must be called with dev->lock held.
  538. */
  539. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  540. gfp_t gfp_flags)
  541. {
  542. if (ep->is_in)
  543. gr_dbgprint_request("RESP", ep, req);
  544. return gr_queue(ep, req, gfp_flags);
  545. }
  546. /* ---------------------------------------------------------------------- */
  547. /* General helper functions */
  548. /*
  549. * Dequeue ALL requests.
  550. *
  551. * Must be called with dev->lock held and irqs disabled.
  552. */
  553. static void gr_ep_nuke(struct gr_ep *ep)
  554. {
  555. struct gr_request *req;
  556. ep->stopped = 1;
  557. ep->dma_start = 0;
  558. gr_abort_dma(ep);
  559. while (!list_empty(&ep->queue)) {
  560. req = list_first_entry(&ep->queue, struct gr_request, queue);
  561. gr_finish_request(ep, req, -ESHUTDOWN);
  562. }
  563. }
  564. /*
  565. * Reset the hardware state of this endpoint.
  566. *
  567. * Must be called with dev->lock held.
  568. */
  569. static void gr_ep_reset(struct gr_ep *ep)
  570. {
  571. gr_write32(&ep->regs->epctrl, 0);
  572. gr_write32(&ep->regs->dmactrl, 0);
  573. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  574. ep->ep.desc = NULL;
  575. ep->stopped = 1;
  576. ep->dma_start = 0;
  577. }
  578. /*
  579. * Generate STALL on ep0in/out.
  580. *
  581. * Must be called with dev->lock held.
  582. */
  583. static void gr_control_stall(struct gr_udc *dev)
  584. {
  585. u32 epctrl;
  586. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  587. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  588. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  589. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  590. dev->ep0state = GR_EP0_STALL;
  591. }
  592. /*
  593. * Halts, halts and wedges, or clears halt for an endpoint.
  594. *
  595. * Must be called with dev->lock held.
  596. */
  597. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  598. {
  599. u32 epctrl;
  600. int retval = 0;
  601. if (ep->num && !ep->ep.desc)
  602. return -EINVAL;
  603. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  604. return -EOPNOTSUPP;
  605. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  606. if (!ep->num) {
  607. if (halt && !fromhost) {
  608. /* ep0 halt from gadget - generate protocol stall */
  609. gr_control_stall(ep->dev);
  610. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  611. return 0;
  612. }
  613. return -EINVAL;
  614. }
  615. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  616. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  617. epctrl = gr_read32(&ep->regs->epctrl);
  618. if (halt) {
  619. /* Set HALT */
  620. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  621. ep->stopped = 1;
  622. if (wedge)
  623. ep->wedged = 1;
  624. } else {
  625. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  626. ep->stopped = 0;
  627. ep->wedged = 0;
  628. /* Things might have been queued up in the meantime */
  629. if (!ep->dma_start)
  630. gr_start_dma(ep);
  631. }
  632. return retval;
  633. }
  634. /* Must be called with dev->lock held */
  635. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  636. {
  637. if (dev->ep0state != value)
  638. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  639. gr_ep0state_string(value));
  640. dev->ep0state = value;
  641. }
  642. /*
  643. * Should only be called when endpoints can not generate interrupts.
  644. *
  645. * Must be called with dev->lock held.
  646. */
  647. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  648. {
  649. gr_write32(&dev->regs->control, 0);
  650. wmb(); /* Make sure that we do not deny one of our interrupts */
  651. dev->irq_enabled = 0;
  652. }
  653. /*
  654. * Stop all device activity and disable data line pullup.
  655. *
  656. * Must be called with dev->lock held and irqs disabled.
  657. */
  658. static void gr_stop_activity(struct gr_udc *dev)
  659. {
  660. struct gr_ep *ep;
  661. list_for_each_entry(ep, &dev->ep_list, ep_list)
  662. gr_ep_nuke(ep);
  663. gr_disable_interrupts_and_pullup(dev);
  664. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  665. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  666. }
  667. /* ---------------------------------------------------------------------- */
  668. /* ep0 setup packet handling */
  669. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  670. struct usb_request *_req)
  671. {
  672. struct gr_ep *ep;
  673. struct gr_udc *dev;
  674. u32 control;
  675. ep = container_of(_ep, struct gr_ep, ep);
  676. dev = ep->dev;
  677. spin_lock(&dev->lock);
  678. control = gr_read32(&dev->regs->control);
  679. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  680. gr_write32(&dev->regs->control, control);
  681. spin_unlock(&dev->lock);
  682. }
  683. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  684. {
  685. /* Nothing needs to be done here */
  686. }
  687. /*
  688. * Queue a response on ep0in.
  689. *
  690. * Must be called with dev->lock held.
  691. */
  692. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  693. void (*complete)(struct usb_ep *ep,
  694. struct usb_request *req))
  695. {
  696. u8 *reqbuf = dev->ep0reqi->req.buf;
  697. int status;
  698. int i;
  699. for (i = 0; i < length; i++)
  700. reqbuf[i] = buf[i];
  701. dev->ep0reqi->req.length = length;
  702. dev->ep0reqi->req.complete = complete;
  703. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  704. if (status < 0)
  705. dev_err(dev->dev,
  706. "Could not queue ep0in setup response: %d\n", status);
  707. return status;
  708. }
  709. /*
  710. * Queue a 2 byte response on ep0in.
  711. *
  712. * Must be called with dev->lock held.
  713. */
  714. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  715. {
  716. __le16 le_response = cpu_to_le16(response);
  717. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  718. gr_ep0_dummy_complete);
  719. }
  720. /*
  721. * Queue a ZLP response on ep0in.
  722. *
  723. * Must be called with dev->lock held.
  724. */
  725. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  726. {
  727. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  728. }
  729. /*
  730. * This is run when a SET_ADDRESS request is received. First writes
  731. * the new address to the control register which is updated internally
  732. * when the next IN packet is ACKED.
  733. *
  734. * Must be called with dev->lock held.
  735. */
  736. static void gr_set_address(struct gr_udc *dev, u8 address)
  737. {
  738. u32 control;
  739. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  740. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  741. control |= GR_CONTROL_SU;
  742. gr_write32(&dev->regs->control, control);
  743. }
  744. /*
  745. * Returns negative for STALL, 0 for successful handling and positive for
  746. * delegation.
  747. *
  748. * Must be called with dev->lock held.
  749. */
  750. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  751. u16 value, u16 index)
  752. {
  753. u16 response;
  754. u8 test;
  755. switch (request) {
  756. case USB_REQ_SET_ADDRESS:
  757. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  758. gr_set_address(dev, value & 0xff);
  759. if (value)
  760. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  761. else
  762. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  763. return gr_ep0_respond_empty(dev);
  764. case USB_REQ_GET_STATUS:
  765. /* Self powered | remote wakeup */
  766. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  767. return gr_ep0_respond_u16(dev, response);
  768. case USB_REQ_SET_FEATURE:
  769. switch (value) {
  770. case USB_DEVICE_REMOTE_WAKEUP:
  771. /* Allow remote wakeup */
  772. dev->remote_wakeup = 1;
  773. return gr_ep0_respond_empty(dev);
  774. case USB_DEVICE_TEST_MODE:
  775. /* The hardware does not support TEST_FORCE_EN */
  776. test = index >> 8;
  777. if (test >= TEST_J && test <= TEST_PACKET) {
  778. dev->test_mode = test;
  779. return gr_ep0_respond(dev, NULL, 0,
  780. gr_ep0_testmode_complete);
  781. }
  782. }
  783. break;
  784. case USB_REQ_CLEAR_FEATURE:
  785. switch (value) {
  786. case USB_DEVICE_REMOTE_WAKEUP:
  787. /* Disallow remote wakeup */
  788. dev->remote_wakeup = 0;
  789. return gr_ep0_respond_empty(dev);
  790. }
  791. break;
  792. }
  793. return 1; /* Delegate the rest */
  794. }
  795. /*
  796. * Returns negative for STALL, 0 for successful handling and positive for
  797. * delegation.
  798. *
  799. * Must be called with dev->lock held.
  800. */
  801. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  802. u16 value, u16 index)
  803. {
  804. if (dev->gadget.state != USB_STATE_CONFIGURED)
  805. return -1;
  806. /*
  807. * Should return STALL for invalid interfaces, but udc driver does not
  808. * know anything about that. However, many gadget drivers do not handle
  809. * GET_STATUS so we need to take care of that.
  810. */
  811. switch (request) {
  812. case USB_REQ_GET_STATUS:
  813. return gr_ep0_respond_u16(dev, 0x0000);
  814. case USB_REQ_SET_FEATURE:
  815. case USB_REQ_CLEAR_FEATURE:
  816. /*
  817. * No possible valid standard requests. Still let gadget drivers
  818. * have a go at it.
  819. */
  820. break;
  821. }
  822. return 1; /* Delegate the rest */
  823. }
  824. /*
  825. * Returns negative for STALL, 0 for successful handling and positive for
  826. * delegation.
  827. *
  828. * Must be called with dev->lock held.
  829. */
  830. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  831. u16 value, u16 index)
  832. {
  833. struct gr_ep *ep;
  834. int status;
  835. int halted;
  836. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  837. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  838. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  839. return -1;
  840. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  841. return -1;
  842. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  843. switch (request) {
  844. case USB_REQ_GET_STATUS:
  845. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  846. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  847. case USB_REQ_SET_FEATURE:
  848. switch (value) {
  849. case USB_ENDPOINT_HALT:
  850. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  851. if (status >= 0)
  852. status = gr_ep0_respond_empty(dev);
  853. return status;
  854. }
  855. break;
  856. case USB_REQ_CLEAR_FEATURE:
  857. switch (value) {
  858. case USB_ENDPOINT_HALT:
  859. if (ep->wedged)
  860. return -1;
  861. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  862. if (status >= 0)
  863. status = gr_ep0_respond_empty(dev);
  864. return status;
  865. }
  866. break;
  867. }
  868. return 1; /* Delegate the rest */
  869. }
  870. /* Must be called with dev->lock held */
  871. static void gr_ep0out_requeue(struct gr_udc *dev)
  872. {
  873. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  874. if (ret)
  875. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  876. ret);
  877. }
  878. /*
  879. * The main function dealing with setup requests on ep0.
  880. *
  881. * Must be called with dev->lock held and irqs disabled
  882. */
  883. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  884. __releases(&dev->lock)
  885. __acquires(&dev->lock)
  886. {
  887. union {
  888. struct usb_ctrlrequest ctrl;
  889. u8 raw[8];
  890. u32 word[2];
  891. } u;
  892. u8 type;
  893. u8 request;
  894. u16 value;
  895. u16 index;
  896. u16 length;
  897. int i;
  898. int status;
  899. /* Restore from ep0 halt */
  900. if (dev->ep0state == GR_EP0_STALL) {
  901. gr_set_ep0state(dev, GR_EP0_SETUP);
  902. if (!req->req.actual)
  903. goto out;
  904. }
  905. if (dev->ep0state == GR_EP0_ISTATUS) {
  906. gr_set_ep0state(dev, GR_EP0_SETUP);
  907. if (req->req.actual > 0)
  908. dev_dbg(dev->dev,
  909. "Unexpected setup packet at state %s\n",
  910. gr_ep0state_string(GR_EP0_ISTATUS));
  911. else
  912. goto out; /* Got expected ZLP */
  913. } else if (dev->ep0state != GR_EP0_SETUP) {
  914. dev_info(dev->dev,
  915. "Unexpected ep0out request at state %s - stalling\n",
  916. gr_ep0state_string(dev->ep0state));
  917. gr_control_stall(dev);
  918. gr_set_ep0state(dev, GR_EP0_SETUP);
  919. goto out;
  920. } else if (!req->req.actual) {
  921. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  922. gr_ep0state_string(dev->ep0state));
  923. goto out;
  924. }
  925. /* Handle SETUP packet */
  926. for (i = 0; i < req->req.actual; i++)
  927. u.raw[i] = ((u8 *)req->req.buf)[i];
  928. type = u.ctrl.bRequestType;
  929. request = u.ctrl.bRequest;
  930. value = le16_to_cpu(u.ctrl.wValue);
  931. index = le16_to_cpu(u.ctrl.wIndex);
  932. length = le16_to_cpu(u.ctrl.wLength);
  933. gr_dbgprint_devreq(dev, type, request, value, index, length);
  934. /* Check for data stage */
  935. if (length) {
  936. if (type & USB_DIR_IN)
  937. gr_set_ep0state(dev, GR_EP0_IDATA);
  938. else
  939. gr_set_ep0state(dev, GR_EP0_ODATA);
  940. }
  941. status = 1; /* Positive status flags delegation */
  942. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  943. switch (type & USB_RECIP_MASK) {
  944. case USB_RECIP_DEVICE:
  945. status = gr_device_request(dev, type, request,
  946. value, index);
  947. break;
  948. case USB_RECIP_ENDPOINT:
  949. status = gr_endpoint_request(dev, type, request,
  950. value, index);
  951. break;
  952. case USB_RECIP_INTERFACE:
  953. status = gr_interface_request(dev, type, request,
  954. value, index);
  955. break;
  956. }
  957. }
  958. if (status > 0) {
  959. spin_unlock(&dev->lock);
  960. dev_vdbg(dev->dev, "DELEGATE\n");
  961. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  962. spin_lock(&dev->lock);
  963. }
  964. /* Generate STALL on both ep0out and ep0in if requested */
  965. if (unlikely(status < 0)) {
  966. dev_vdbg(dev->dev, "STALL\n");
  967. gr_control_stall(dev);
  968. }
  969. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  970. request == USB_REQ_SET_CONFIGURATION) {
  971. if (!value) {
  972. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  973. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  974. } else if (status >= 0) {
  975. /* Not configured unless gadget OK:s it */
  976. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  977. usb_gadget_set_state(&dev->gadget,
  978. USB_STATE_CONFIGURED);
  979. }
  980. }
  981. /* Get ready for next stage */
  982. if (dev->ep0state == GR_EP0_ODATA)
  983. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  984. else if (dev->ep0state == GR_EP0_IDATA)
  985. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  986. else
  987. gr_set_ep0state(dev, GR_EP0_SETUP);
  988. out:
  989. gr_ep0out_requeue(dev);
  990. }
  991. /* ---------------------------------------------------------------------- */
  992. /* VBUS and USB reset handling */
  993. /* Must be called with dev->lock held and irqs disabled */
  994. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  995. {
  996. u32 control;
  997. dev->gadget.speed = GR_SPEED(status);
  998. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  999. /* Turn on full interrupts and pullup */
  1000. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  1001. GR_CONTROL_SP | GR_CONTROL_EP);
  1002. gr_write32(&dev->regs->control, control);
  1003. }
  1004. /* Must be called with dev->lock held */
  1005. static void gr_enable_vbus_detect(struct gr_udc *dev)
  1006. {
  1007. u32 status;
  1008. dev->irq_enabled = 1;
  1009. wmb(); /* Make sure we do not ignore an interrupt */
  1010. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  1011. /* Take care of the case we are already plugged in at this point */
  1012. status = gr_read32(&dev->regs->status);
  1013. if (status & GR_STATUS_VB)
  1014. gr_vbus_connected(dev, status);
  1015. }
  1016. /* Must be called with dev->lock held and irqs disabled */
  1017. static void gr_vbus_disconnected(struct gr_udc *dev)
  1018. {
  1019. gr_stop_activity(dev);
  1020. /* Report disconnect */
  1021. if (dev->driver && dev->driver->disconnect) {
  1022. spin_unlock(&dev->lock);
  1023. dev->driver->disconnect(&dev->gadget);
  1024. spin_lock(&dev->lock);
  1025. }
  1026. gr_enable_vbus_detect(dev);
  1027. }
  1028. /* Must be called with dev->lock held and irqs disabled */
  1029. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1030. {
  1031. gr_set_address(dev, 0);
  1032. gr_set_ep0state(dev, GR_EP0_SETUP);
  1033. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1034. dev->gadget.speed = GR_SPEED(status);
  1035. gr_ep_nuke(&dev->epo[0]);
  1036. gr_ep_nuke(&dev->epi[0]);
  1037. dev->epo[0].stopped = 0;
  1038. dev->epi[0].stopped = 0;
  1039. gr_ep0out_requeue(dev);
  1040. }
  1041. /* ---------------------------------------------------------------------- */
  1042. /* Irq handling */
  1043. /*
  1044. * Handles interrupts from in endpoints. Returns whether something was handled.
  1045. *
  1046. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1047. */
  1048. static int gr_handle_in_ep(struct gr_ep *ep)
  1049. {
  1050. struct gr_request *req;
  1051. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1052. if (!req->last_desc)
  1053. return 0;
  1054. if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1055. return 0; /* Not put in hardware buffers yet */
  1056. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1057. return 0; /* Not transmitted yet, still in hardware buffers */
  1058. /* Write complete */
  1059. gr_dma_advance(ep, 0);
  1060. return 1;
  1061. }
  1062. /*
  1063. * Handles interrupts from out endpoints. Returns whether something was handled.
  1064. *
  1065. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1066. */
  1067. static int gr_handle_out_ep(struct gr_ep *ep)
  1068. {
  1069. u32 ep_dmactrl;
  1070. u32 ctrl;
  1071. u16 len;
  1072. struct gr_request *req;
  1073. struct gr_udc *dev = ep->dev;
  1074. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1075. if (!req->curr_desc)
  1076. return 0;
  1077. ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
  1078. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1079. return 0; /* Not received yet */
  1080. /* Read complete */
  1081. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1082. req->req.actual += len;
  1083. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1084. req->setup = 1;
  1085. if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
  1086. /* Short packet or >= expected size - we are done */
  1087. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1088. /*
  1089. * Send a status stage ZLP to ack the DATA stage in the
  1090. * OUT direction. This needs to be done before
  1091. * gr_dma_advance as that can lead to a call to
  1092. * ep0_setup that can change dev->ep0state.
  1093. */
  1094. gr_ep0_respond_empty(dev);
  1095. gr_set_ep0state(dev, GR_EP0_SETUP);
  1096. }
  1097. gr_dma_advance(ep, 0);
  1098. } else {
  1099. /* Not done yet. Enable the next descriptor to receive more. */
  1100. req->curr_desc = req->curr_desc->next_desc;
  1101. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1102. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1103. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1104. }
  1105. return 1;
  1106. }
  1107. /*
  1108. * Handle state changes. Returns whether something was handled.
  1109. *
  1110. * Must be called with dev->lock held and irqs disabled.
  1111. */
  1112. static int gr_handle_state_changes(struct gr_udc *dev)
  1113. {
  1114. u32 status = gr_read32(&dev->regs->status);
  1115. int handled = 0;
  1116. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1117. dev->gadget.state == USB_STATE_ATTACHED);
  1118. /* VBUS valid detected */
  1119. if (!powstate && (status & GR_STATUS_VB)) {
  1120. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1121. gr_vbus_connected(dev, status);
  1122. handled = 1;
  1123. }
  1124. /* Disconnect */
  1125. if (powstate && !(status & GR_STATUS_VB)) {
  1126. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1127. gr_vbus_disconnected(dev);
  1128. handled = 1;
  1129. }
  1130. /* USB reset detected */
  1131. if (status & GR_STATUS_UR) {
  1132. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1133. GR_SPEED_STR(status));
  1134. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1135. gr_udc_usbreset(dev, status);
  1136. handled = 1;
  1137. }
  1138. /* Speed change */
  1139. if (dev->gadget.speed != GR_SPEED(status)) {
  1140. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1141. GR_SPEED_STR(status));
  1142. dev->gadget.speed = GR_SPEED(status);
  1143. handled = 1;
  1144. }
  1145. /* Going into suspend */
  1146. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1147. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1148. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1149. dev->suspended_from = dev->gadget.state;
  1150. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1151. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1152. dev->driver && dev->driver->suspend) {
  1153. spin_unlock(&dev->lock);
  1154. dev->driver->suspend(&dev->gadget);
  1155. spin_lock(&dev->lock);
  1156. }
  1157. handled = 1;
  1158. }
  1159. /* Coming out of suspend */
  1160. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1161. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1162. if (dev->suspended_from == USB_STATE_POWERED)
  1163. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1164. else
  1165. gr_set_ep0state(dev, GR_EP0_SETUP);
  1166. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1167. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1168. dev->driver && dev->driver->resume) {
  1169. spin_unlock(&dev->lock);
  1170. dev->driver->resume(&dev->gadget);
  1171. spin_lock(&dev->lock);
  1172. }
  1173. handled = 1;
  1174. }
  1175. return handled;
  1176. }
  1177. /* Non-interrupt context irq handler */
  1178. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1179. {
  1180. struct gr_udc *dev = _dev;
  1181. struct gr_ep *ep;
  1182. int handled = 0;
  1183. int i;
  1184. unsigned long flags;
  1185. spin_lock_irqsave(&dev->lock, flags);
  1186. if (!dev->irq_enabled)
  1187. goto out;
  1188. /*
  1189. * Check IN ep interrupts. We check these before the OUT eps because
  1190. * some gadgets reuse the request that might already be currently
  1191. * outstanding and needs to be completed (mainly setup requests).
  1192. */
  1193. for (i = 0; i < dev->nepi; i++) {
  1194. ep = &dev->epi[i];
  1195. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1196. handled = gr_handle_in_ep(ep) || handled;
  1197. }
  1198. /* Check OUT ep interrupts */
  1199. for (i = 0; i < dev->nepo; i++) {
  1200. ep = &dev->epo[i];
  1201. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1202. handled = gr_handle_out_ep(ep) || handled;
  1203. }
  1204. /* Check status interrupts */
  1205. handled = gr_handle_state_changes(dev) || handled;
  1206. /*
  1207. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1208. * handle because this shouldn't happen if we did everything right.
  1209. */
  1210. if (!handled) {
  1211. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1212. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1213. dev_err(dev->dev,
  1214. "AMBA Error occurred for %s\n",
  1215. ep->ep.name);
  1216. handled = 1;
  1217. }
  1218. }
  1219. }
  1220. out:
  1221. spin_unlock_irqrestore(&dev->lock, flags);
  1222. return handled ? IRQ_HANDLED : IRQ_NONE;
  1223. }
  1224. /* Interrupt context irq handler */
  1225. static irqreturn_t gr_irq(int irq, void *_dev)
  1226. {
  1227. struct gr_udc *dev = _dev;
  1228. if (!dev->irq_enabled)
  1229. return IRQ_NONE;
  1230. return IRQ_WAKE_THREAD;
  1231. }
  1232. /* ---------------------------------------------------------------------- */
  1233. /* USB ep ops */
  1234. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1235. static int gr_ep_enable(struct usb_ep *_ep,
  1236. const struct usb_endpoint_descriptor *desc)
  1237. {
  1238. struct gr_udc *dev;
  1239. struct gr_ep *ep;
  1240. u8 mode;
  1241. u8 nt;
  1242. u16 max;
  1243. u16 buffer_size = 0;
  1244. u32 epctrl;
  1245. ep = container_of(_ep, struct gr_ep, ep);
  1246. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1247. return -EINVAL;
  1248. dev = ep->dev;
  1249. /* 'ep0' IN and OUT are reserved */
  1250. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1251. return -EINVAL;
  1252. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1253. return -ESHUTDOWN;
  1254. /* Make sure we are clear for enabling */
  1255. epctrl = gr_read32(&ep->regs->epctrl);
  1256. if (epctrl & GR_EPCTRL_EV)
  1257. return -EBUSY;
  1258. /* Check that directions match */
  1259. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1260. return -EINVAL;
  1261. /* Check ep num */
  1262. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1263. (ep->is_in && ep->num >= dev->nepi))
  1264. return -EINVAL;
  1265. if (usb_endpoint_xfer_control(desc)) {
  1266. mode = 0;
  1267. } else if (usb_endpoint_xfer_isoc(desc)) {
  1268. mode = 1;
  1269. } else if (usb_endpoint_xfer_bulk(desc)) {
  1270. mode = 2;
  1271. } else if (usb_endpoint_xfer_int(desc)) {
  1272. mode = 3;
  1273. } else {
  1274. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1275. ep->ep.name);
  1276. return -EINVAL;
  1277. }
  1278. /*
  1279. * Bits 10-0 set the max payload. 12-11 set the number of
  1280. * additional transactions.
  1281. */
  1282. max = 0x7ff & usb_endpoint_maxp(desc);
  1283. nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
  1284. buffer_size = GR_BUFFER_SIZE(epctrl);
  1285. if (nt && (mode == 0 || mode == 2)) {
  1286. dev_err(dev->dev,
  1287. "%s mode: multiple trans./microframe not valid\n",
  1288. (mode == 2 ? "Bulk" : "Control"));
  1289. return -EINVAL;
  1290. } else if (nt == 0x3) {
  1291. dev_err(dev->dev,
  1292. "Invalid value 0x3 for additional trans./microframe\n");
  1293. return -EINVAL;
  1294. } else if ((nt + 1) * max > buffer_size) {
  1295. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1296. buffer_size, (nt + 1), max);
  1297. return -EINVAL;
  1298. } else if (max == 0) {
  1299. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1300. return -EINVAL;
  1301. } else if (max > ep->ep.maxpacket_limit) {
  1302. dev_err(dev->dev, "Requested max payload %d > limit %d\n",
  1303. max, ep->ep.maxpacket_limit);
  1304. return -EINVAL;
  1305. }
  1306. spin_lock(&ep->dev->lock);
  1307. if (!ep->stopped) {
  1308. spin_unlock(&ep->dev->lock);
  1309. return -EBUSY;
  1310. }
  1311. ep->stopped = 0;
  1312. ep->wedged = 0;
  1313. ep->ep.desc = desc;
  1314. ep->ep.maxpacket = max;
  1315. ep->dma_start = 0;
  1316. if (nt) {
  1317. /*
  1318. * Maximum possible size of all payloads in one microframe
  1319. * regardless of direction when using high-bandwidth mode.
  1320. */
  1321. ep->bytes_per_buffer = (nt + 1) * max;
  1322. } else if (ep->is_in) {
  1323. /*
  1324. * The biggest multiple of maximum packet size that fits into
  1325. * the buffer. The hardware will split up into many packets in
  1326. * the IN direction.
  1327. */
  1328. ep->bytes_per_buffer = (buffer_size / max) * max;
  1329. } else {
  1330. /*
  1331. * Only single packets will be placed the buffers in the OUT
  1332. * direction.
  1333. */
  1334. ep->bytes_per_buffer = max;
  1335. }
  1336. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1337. | (nt << GR_EPCTRL_NT_POS)
  1338. | (mode << GR_EPCTRL_TT_POS)
  1339. | GR_EPCTRL_EV;
  1340. if (ep->is_in)
  1341. epctrl |= GR_EPCTRL_PI;
  1342. gr_write32(&ep->regs->epctrl, epctrl);
  1343. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1344. spin_unlock(&ep->dev->lock);
  1345. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1346. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1347. return 0;
  1348. }
  1349. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1350. static int gr_ep_disable(struct usb_ep *_ep)
  1351. {
  1352. struct gr_ep *ep;
  1353. struct gr_udc *dev;
  1354. unsigned long flags;
  1355. ep = container_of(_ep, struct gr_ep, ep);
  1356. if (!_ep || !ep->ep.desc)
  1357. return -ENODEV;
  1358. dev = ep->dev;
  1359. /* 'ep0' IN and OUT are reserved */
  1360. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1361. return -EINVAL;
  1362. if (dev->ep0state == GR_EP0_SUSPEND)
  1363. return -EBUSY;
  1364. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1365. spin_lock_irqsave(&dev->lock, flags);
  1366. gr_ep_nuke(ep);
  1367. gr_ep_reset(ep);
  1368. ep->ep.desc = NULL;
  1369. spin_unlock_irqrestore(&dev->lock, flags);
  1370. return 0;
  1371. }
  1372. /*
  1373. * Frees a request, but not any DMA buffers associated with it
  1374. * (gr_finish_request should already have taken care of that).
  1375. */
  1376. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1377. {
  1378. struct gr_request *req;
  1379. if (!_ep || !_req)
  1380. return;
  1381. req = container_of(_req, struct gr_request, req);
  1382. /* Leads to memory leak */
  1383. WARN(!list_empty(&req->queue),
  1384. "request not dequeued properly before freeing\n");
  1385. kfree(req);
  1386. }
  1387. /* Queue a request from the gadget */
  1388. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1389. gfp_t gfp_flags)
  1390. {
  1391. struct gr_ep *ep;
  1392. struct gr_request *req;
  1393. struct gr_udc *dev;
  1394. int ret;
  1395. if (unlikely(!_ep || !_req))
  1396. return -EINVAL;
  1397. ep = container_of(_ep, struct gr_ep, ep);
  1398. req = container_of(_req, struct gr_request, req);
  1399. dev = ep->dev;
  1400. spin_lock(&ep->dev->lock);
  1401. /*
  1402. * The ep0 pointer in the gadget struct is used both for ep0in and
  1403. * ep0out. In a data stage in the out direction ep0out needs to be used
  1404. * instead of the default ep0in. Completion functions might use
  1405. * driver_data, so that needs to be copied as well.
  1406. */
  1407. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1408. ep = &dev->epo[0];
  1409. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1410. }
  1411. if (ep->is_in)
  1412. gr_dbgprint_request("EXTERN", ep, req);
  1413. ret = gr_queue(ep, req, GFP_ATOMIC);
  1414. spin_unlock(&ep->dev->lock);
  1415. return ret;
  1416. }
  1417. /* Dequeue JUST ONE request */
  1418. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1419. {
  1420. struct gr_request *req;
  1421. struct gr_ep *ep;
  1422. struct gr_udc *dev;
  1423. int ret = 0;
  1424. unsigned long flags;
  1425. ep = container_of(_ep, struct gr_ep, ep);
  1426. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1427. return -EINVAL;
  1428. dev = ep->dev;
  1429. if (!dev->driver)
  1430. return -ESHUTDOWN;
  1431. /* We can't touch (DMA) registers when suspended */
  1432. if (dev->ep0state == GR_EP0_SUSPEND)
  1433. return -EBUSY;
  1434. spin_lock_irqsave(&dev->lock, flags);
  1435. /* Make sure it's actually queued on this endpoint */
  1436. list_for_each_entry(req, &ep->queue, queue) {
  1437. if (&req->req == _req)
  1438. break;
  1439. }
  1440. if (&req->req != _req) {
  1441. ret = -EINVAL;
  1442. goto out;
  1443. }
  1444. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1445. /* This request is currently being processed */
  1446. gr_abort_dma(ep);
  1447. if (ep->stopped)
  1448. gr_finish_request(ep, req, -ECONNRESET);
  1449. else
  1450. gr_dma_advance(ep, -ECONNRESET);
  1451. } else if (!list_empty(&req->queue)) {
  1452. /* Not being processed - gr_finish_request dequeues it */
  1453. gr_finish_request(ep, req, -ECONNRESET);
  1454. } else {
  1455. ret = -EOPNOTSUPP;
  1456. }
  1457. out:
  1458. spin_unlock_irqrestore(&dev->lock, flags);
  1459. return ret;
  1460. }
  1461. /* Helper for gr_set_halt and gr_set_wedge */
  1462. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1463. {
  1464. int ret;
  1465. struct gr_ep *ep;
  1466. if (!_ep)
  1467. return -ENODEV;
  1468. ep = container_of(_ep, struct gr_ep, ep);
  1469. spin_lock(&ep->dev->lock);
  1470. /* Halting an IN endpoint should fail if queue is not empty */
  1471. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1472. ret = -EAGAIN;
  1473. goto out;
  1474. }
  1475. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1476. out:
  1477. spin_unlock(&ep->dev->lock);
  1478. return ret;
  1479. }
  1480. /* Halt endpoint */
  1481. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1482. {
  1483. return gr_set_halt_wedge(_ep, halt, 0);
  1484. }
  1485. /* Halt and wedge endpoint */
  1486. static int gr_set_wedge(struct usb_ep *_ep)
  1487. {
  1488. return gr_set_halt_wedge(_ep, 1, 1);
  1489. }
  1490. /*
  1491. * Return the total number of bytes currently stored in the internal buffers of
  1492. * the endpoint.
  1493. */
  1494. static int gr_fifo_status(struct usb_ep *_ep)
  1495. {
  1496. struct gr_ep *ep;
  1497. u32 epstat;
  1498. u32 bytes = 0;
  1499. if (!_ep)
  1500. return -ENODEV;
  1501. ep = container_of(_ep, struct gr_ep, ep);
  1502. epstat = gr_read32(&ep->regs->epstat);
  1503. if (epstat & GR_EPSTAT_B0)
  1504. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1505. if (epstat & GR_EPSTAT_B1)
  1506. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1507. return bytes;
  1508. }
  1509. /* Empty data from internal buffers of an endpoint. */
  1510. static void gr_fifo_flush(struct usb_ep *_ep)
  1511. {
  1512. struct gr_ep *ep;
  1513. u32 epctrl;
  1514. if (!_ep)
  1515. return;
  1516. ep = container_of(_ep, struct gr_ep, ep);
  1517. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1518. spin_lock(&ep->dev->lock);
  1519. epctrl = gr_read32(&ep->regs->epctrl);
  1520. epctrl |= GR_EPCTRL_CB;
  1521. gr_write32(&ep->regs->epctrl, epctrl);
  1522. spin_unlock(&ep->dev->lock);
  1523. }
  1524. static struct usb_ep_ops gr_ep_ops = {
  1525. .enable = gr_ep_enable,
  1526. .disable = gr_ep_disable,
  1527. .alloc_request = gr_alloc_request,
  1528. .free_request = gr_free_request,
  1529. .queue = gr_queue_ext,
  1530. .dequeue = gr_dequeue,
  1531. .set_halt = gr_set_halt,
  1532. .set_wedge = gr_set_wedge,
  1533. .fifo_status = gr_fifo_status,
  1534. .fifo_flush = gr_fifo_flush,
  1535. };
  1536. /* ---------------------------------------------------------------------- */
  1537. /* USB Gadget ops */
  1538. static int gr_get_frame(struct usb_gadget *_gadget)
  1539. {
  1540. struct gr_udc *dev;
  1541. if (!_gadget)
  1542. return -ENODEV;
  1543. dev = container_of(_gadget, struct gr_udc, gadget);
  1544. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1545. }
  1546. static int gr_wakeup(struct usb_gadget *_gadget)
  1547. {
  1548. struct gr_udc *dev;
  1549. if (!_gadget)
  1550. return -ENODEV;
  1551. dev = container_of(_gadget, struct gr_udc, gadget);
  1552. /* Remote wakeup feature not enabled by host*/
  1553. if (!dev->remote_wakeup)
  1554. return -EINVAL;
  1555. spin_lock(&dev->lock);
  1556. gr_write32(&dev->regs->control,
  1557. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1558. spin_unlock(&dev->lock);
  1559. return 0;
  1560. }
  1561. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1562. {
  1563. struct gr_udc *dev;
  1564. u32 control;
  1565. if (!_gadget)
  1566. return -ENODEV;
  1567. dev = container_of(_gadget, struct gr_udc, gadget);
  1568. spin_lock(&dev->lock);
  1569. control = gr_read32(&dev->regs->control);
  1570. if (is_on)
  1571. control |= GR_CONTROL_EP;
  1572. else
  1573. control &= ~GR_CONTROL_EP;
  1574. gr_write32(&dev->regs->control, control);
  1575. spin_unlock(&dev->lock);
  1576. return 0;
  1577. }
  1578. static int gr_udc_start(struct usb_gadget *gadget,
  1579. struct usb_gadget_driver *driver)
  1580. {
  1581. struct gr_udc *dev = to_gr_udc(gadget);
  1582. spin_lock(&dev->lock);
  1583. /* Hook up the driver */
  1584. driver->driver.bus = NULL;
  1585. dev->driver = driver;
  1586. /* Get ready for host detection */
  1587. gr_enable_vbus_detect(dev);
  1588. spin_unlock(&dev->lock);
  1589. return 0;
  1590. }
  1591. static int gr_udc_stop(struct usb_gadget *gadget)
  1592. {
  1593. struct gr_udc *dev = to_gr_udc(gadget);
  1594. unsigned long flags;
  1595. spin_lock_irqsave(&dev->lock, flags);
  1596. dev->driver = NULL;
  1597. gr_stop_activity(dev);
  1598. spin_unlock_irqrestore(&dev->lock, flags);
  1599. return 0;
  1600. }
  1601. static const struct usb_gadget_ops gr_ops = {
  1602. .get_frame = gr_get_frame,
  1603. .wakeup = gr_wakeup,
  1604. .pullup = gr_pullup,
  1605. .udc_start = gr_udc_start,
  1606. .udc_stop = gr_udc_stop,
  1607. /* Other operations not supported */
  1608. };
  1609. /* ---------------------------------------------------------------------- */
  1610. /* Module probe, removal and of-matching */
  1611. static const char * const onames[] = {
  1612. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1613. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1614. "ep12out", "ep13out", "ep14out", "ep15out"
  1615. };
  1616. static const char * const inames[] = {
  1617. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1618. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1619. "ep12in", "ep13in", "ep14in", "ep15in"
  1620. };
  1621. /* Must be called with dev->lock held */
  1622. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1623. {
  1624. struct gr_ep *ep;
  1625. struct gr_request *req;
  1626. struct usb_request *_req;
  1627. void *buf;
  1628. if (is_in) {
  1629. ep = &dev->epi[num];
  1630. ep->ep.name = inames[num];
  1631. ep->regs = &dev->regs->epi[num];
  1632. } else {
  1633. ep = &dev->epo[num];
  1634. ep->ep.name = onames[num];
  1635. ep->regs = &dev->regs->epo[num];
  1636. }
  1637. gr_ep_reset(ep);
  1638. ep->num = num;
  1639. ep->is_in = is_in;
  1640. ep->dev = dev;
  1641. ep->ep.ops = &gr_ep_ops;
  1642. INIT_LIST_HEAD(&ep->queue);
  1643. if (num == 0) {
  1644. _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
  1645. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
  1646. if (!_req || !buf) {
  1647. /* possible _req freed by gr_probe via gr_remove */
  1648. return -ENOMEM;
  1649. }
  1650. req = container_of(_req, struct gr_request, req);
  1651. req->req.buf = buf;
  1652. req->req.length = MAX_CTRL_PL_SIZE;
  1653. if (is_in)
  1654. dev->ep0reqi = req; /* Complete gets set as used */
  1655. else
  1656. dev->ep0reqo = req; /* Completion treated separately */
  1657. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1658. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1659. ep->ep.caps.type_control = true;
  1660. } else {
  1661. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1662. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1663. ep->ep.caps.type_iso = true;
  1664. ep->ep.caps.type_bulk = true;
  1665. ep->ep.caps.type_int = true;
  1666. }
  1667. list_add_tail(&ep->ep_list, &dev->ep_list);
  1668. if (is_in)
  1669. ep->ep.caps.dir_in = true;
  1670. else
  1671. ep->ep.caps.dir_out = true;
  1672. ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
  1673. &ep->tailbuf_paddr, GFP_ATOMIC);
  1674. if (!ep->tailbuf)
  1675. return -ENOMEM;
  1676. return 0;
  1677. }
  1678. /* Must be called with dev->lock held */
  1679. static int gr_udc_init(struct gr_udc *dev)
  1680. {
  1681. struct device_node *np = dev->dev->of_node;
  1682. u32 epctrl_val;
  1683. u32 dmactrl_val;
  1684. int i;
  1685. int ret = 0;
  1686. u32 bufsize;
  1687. gr_set_address(dev, 0);
  1688. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1689. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1690. dev->gadget.ep0 = &dev->epi[0].ep;
  1691. INIT_LIST_HEAD(&dev->ep_list);
  1692. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1693. for (i = 0; i < dev->nepo; i++) {
  1694. if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
  1695. bufsize = 1024;
  1696. ret = gr_ep_init(dev, i, 0, bufsize);
  1697. if (ret)
  1698. return ret;
  1699. }
  1700. for (i = 0; i < dev->nepi; i++) {
  1701. if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
  1702. bufsize = 1024;
  1703. ret = gr_ep_init(dev, i, 1, bufsize);
  1704. if (ret)
  1705. return ret;
  1706. }
  1707. /* Must be disabled by default */
  1708. dev->remote_wakeup = 0;
  1709. /* Enable ep0out and ep0in */
  1710. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1711. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1712. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1713. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1714. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1715. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1716. return 0;
  1717. }
  1718. static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
  1719. {
  1720. struct gr_ep *ep;
  1721. if (is_in)
  1722. ep = &dev->epi[num];
  1723. else
  1724. ep = &dev->epo[num];
  1725. if (ep->tailbuf)
  1726. dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
  1727. ep->tailbuf, ep->tailbuf_paddr);
  1728. }
  1729. static int gr_remove(struct platform_device *pdev)
  1730. {
  1731. struct gr_udc *dev = platform_get_drvdata(pdev);
  1732. int i;
  1733. if (dev->added)
  1734. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1735. if (dev->driver)
  1736. return -EBUSY;
  1737. gr_dfs_delete(dev);
  1738. dma_pool_destroy(dev->desc_pool);
  1739. platform_set_drvdata(pdev, NULL);
  1740. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1741. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1742. for (i = 0; i < dev->nepo; i++)
  1743. gr_ep_remove(dev, i, 0);
  1744. for (i = 0; i < dev->nepi; i++)
  1745. gr_ep_remove(dev, i, 1);
  1746. return 0;
  1747. }
  1748. static int gr_request_irq(struct gr_udc *dev, int irq)
  1749. {
  1750. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1751. IRQF_SHARED, driver_name, dev);
  1752. }
  1753. static int gr_probe(struct platform_device *pdev)
  1754. {
  1755. struct gr_udc *dev;
  1756. struct resource *res;
  1757. struct gr_regs __iomem *regs;
  1758. int retval;
  1759. u32 status;
  1760. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  1761. if (!dev)
  1762. return -ENOMEM;
  1763. dev->dev = &pdev->dev;
  1764. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1765. regs = devm_ioremap_resource(dev->dev, res);
  1766. if (IS_ERR(regs))
  1767. return PTR_ERR(regs);
  1768. dev->irq = platform_get_irq(pdev, 0);
  1769. if (dev->irq <= 0) {
  1770. dev_err(dev->dev, "No irq found\n");
  1771. return -ENODEV;
  1772. }
  1773. /* Some core configurations has separate irqs for IN and OUT events */
  1774. dev->irqi = platform_get_irq(pdev, 1);
  1775. if (dev->irqi > 0) {
  1776. dev->irqo = platform_get_irq(pdev, 2);
  1777. if (dev->irqo <= 0) {
  1778. dev_err(dev->dev, "Found irqi but not irqo\n");
  1779. return -ENODEV;
  1780. }
  1781. } else {
  1782. dev->irqi = 0;
  1783. }
  1784. dev->gadget.name = driver_name;
  1785. dev->gadget.max_speed = USB_SPEED_HIGH;
  1786. dev->gadget.ops = &gr_ops;
  1787. spin_lock_init(&dev->lock);
  1788. dev->regs = regs;
  1789. platform_set_drvdata(pdev, dev);
  1790. /* Determine number of endpoints and data interface mode */
  1791. status = gr_read32(&dev->regs->status);
  1792. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1793. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1794. if (!(status & GR_STATUS_DM)) {
  1795. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1796. return -ENODEV;
  1797. }
  1798. /* --- Effects of the following calls might need explicit cleanup --- */
  1799. /* Create DMA pool for descriptors */
  1800. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1801. sizeof(struct gr_dma_desc), 4, 0);
  1802. if (!dev->desc_pool) {
  1803. dev_err(dev->dev, "Could not allocate DMA pool");
  1804. return -ENOMEM;
  1805. }
  1806. spin_lock(&dev->lock);
  1807. /* Inside lock so that no gadget can use this udc until probe is done */
  1808. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1809. if (retval) {
  1810. dev_err(dev->dev, "Could not add gadget udc");
  1811. goto out;
  1812. }
  1813. dev->added = 1;
  1814. retval = gr_udc_init(dev);
  1815. if (retval)
  1816. goto out;
  1817. gr_dfs_create(dev);
  1818. /* Clear all interrupt enables that might be left on since last boot */
  1819. gr_disable_interrupts_and_pullup(dev);
  1820. retval = gr_request_irq(dev, dev->irq);
  1821. if (retval) {
  1822. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1823. goto out;
  1824. }
  1825. if (dev->irqi) {
  1826. retval = gr_request_irq(dev, dev->irqi);
  1827. if (retval) {
  1828. dev_err(dev->dev, "Failed to request irqi %d\n",
  1829. dev->irqi);
  1830. goto out;
  1831. }
  1832. retval = gr_request_irq(dev, dev->irqo);
  1833. if (retval) {
  1834. dev_err(dev->dev, "Failed to request irqo %d\n",
  1835. dev->irqo);
  1836. goto out;
  1837. }
  1838. }
  1839. if (dev->irqi)
  1840. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1841. dev->irq, dev->irqi, dev->irqo);
  1842. else
  1843. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1844. out:
  1845. spin_unlock(&dev->lock);
  1846. if (retval)
  1847. gr_remove(pdev);
  1848. return retval;
  1849. }
  1850. static const struct of_device_id gr_match[] = {
  1851. {.name = "GAISLER_USBDC"},
  1852. {.name = "01_021"},
  1853. {},
  1854. };
  1855. MODULE_DEVICE_TABLE(of, gr_match);
  1856. static struct platform_driver gr_driver = {
  1857. .driver = {
  1858. .name = DRIVER_NAME,
  1859. .of_match_table = gr_match,
  1860. },
  1861. .probe = gr_probe,
  1862. .remove = gr_remove,
  1863. };
  1864. module_platform_driver(gr_driver);
  1865. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1866. MODULE_DESCRIPTION(DRIVER_DESC);
  1867. MODULE_LICENSE("GPL");