core.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/module.h>
  19. #include <net/cfg80211.h>
  20. #include <net/rtnetlink.h>
  21. #include <brcmu_utils.h>
  22. #include <brcmu_wifi.h>
  23. #include "core.h"
  24. #include "bus.h"
  25. #include "debug.h"
  26. #include "fwil_types.h"
  27. #include "p2p.h"
  28. #include "cfg80211.h"
  29. #include "fwil.h"
  30. #include "fwsignal.h"
  31. #include "feature.h"
  32. #include "proto.h"
  33. #include "pcie.h"
  34. #include "common.h"
  35. MODULE_AUTHOR("Broadcom Corporation");
  36. MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
  37. MODULE_LICENSE("Dual BSD/GPL");
  38. #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
  39. /* AMPDU rx reordering definitions */
  40. #define BRCMF_RXREORDER_FLOWID_OFFSET 0
  41. #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
  42. #define BRCMF_RXREORDER_FLAGS_OFFSET 4
  43. #define BRCMF_RXREORDER_CURIDX_OFFSET 6
  44. #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
  45. #define BRCMF_RXREORDER_DEL_FLOW 0x01
  46. #define BRCMF_RXREORDER_FLUSH_ALL 0x02
  47. #define BRCMF_RXREORDER_CURIDX_VALID 0x04
  48. #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
  49. #define BRCMF_RXREORDER_NEW_HOLE 0x10
  50. #define BRCMF_BSSIDX_INVALID -1
  51. /* Error bits */
  52. int brcmf_msg_level;
  53. module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
  54. MODULE_PARM_DESC(debug, "level of debug output");
  55. /* P2P0 enable */
  56. static int brcmf_p2p_enable;
  57. module_param_named(p2pon, brcmf_p2p_enable, int, 0);
  58. MODULE_PARM_DESC(p2pon, "enable legacy p2p management functionality");
  59. char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
  60. {
  61. if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
  62. brcmf_err("ifidx %d out of range\n", ifidx);
  63. return "<if_bad>";
  64. }
  65. if (drvr->iflist[ifidx] == NULL) {
  66. brcmf_err("null i/f %d\n", ifidx);
  67. return "<if_null>";
  68. }
  69. if (drvr->iflist[ifidx]->ndev)
  70. return drvr->iflist[ifidx]->ndev->name;
  71. return "<if_none>";
  72. }
  73. struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx)
  74. {
  75. struct brcmf_if *ifp;
  76. s32 bssidx;
  77. if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
  78. brcmf_err("ifidx %d out of range\n", ifidx);
  79. return NULL;
  80. }
  81. ifp = NULL;
  82. bssidx = drvr->if2bss[ifidx];
  83. if (bssidx >= 0)
  84. ifp = drvr->iflist[bssidx];
  85. return ifp;
  86. }
  87. static void _brcmf_set_multicast_list(struct work_struct *work)
  88. {
  89. struct brcmf_if *ifp;
  90. struct net_device *ndev;
  91. struct netdev_hw_addr *ha;
  92. u32 cmd_value, cnt;
  93. __le32 cnt_le;
  94. char *buf, *bufp;
  95. u32 buflen;
  96. s32 err;
  97. ifp = container_of(work, struct brcmf_if, multicast_work);
  98. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  99. ndev = ifp->ndev;
  100. /* Determine initial value of allmulti flag */
  101. cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
  102. /* Send down the multicast list first. */
  103. cnt = netdev_mc_count(ndev);
  104. buflen = sizeof(cnt) + (cnt * ETH_ALEN);
  105. buf = kmalloc(buflen, GFP_ATOMIC);
  106. if (!buf)
  107. return;
  108. bufp = buf;
  109. cnt_le = cpu_to_le32(cnt);
  110. memcpy(bufp, &cnt_le, sizeof(cnt_le));
  111. bufp += sizeof(cnt_le);
  112. netdev_for_each_mc_addr(ha, ndev) {
  113. if (!cnt)
  114. break;
  115. memcpy(bufp, ha->addr, ETH_ALEN);
  116. bufp += ETH_ALEN;
  117. cnt--;
  118. }
  119. err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
  120. if (err < 0) {
  121. brcmf_err("Setting mcast_list failed, %d\n", err);
  122. cmd_value = cnt ? true : cmd_value;
  123. }
  124. kfree(buf);
  125. /*
  126. * Now send the allmulti setting. This is based on the setting in the
  127. * net_device flags, but might be modified above to be turned on if we
  128. * were trying to set some addresses and dongle rejected it...
  129. */
  130. err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
  131. if (err < 0)
  132. brcmf_err("Setting allmulti failed, %d\n", err);
  133. /*Finally, pick up the PROMISC flag */
  134. cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
  135. err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
  136. if (err < 0)
  137. brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
  138. err);
  139. }
  140. static void
  141. _brcmf_set_mac_address(struct work_struct *work)
  142. {
  143. struct brcmf_if *ifp;
  144. s32 err;
  145. ifp = container_of(work, struct brcmf_if, setmacaddr_work);
  146. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  147. err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
  148. ETH_ALEN);
  149. if (err < 0) {
  150. brcmf_err("Setting cur_etheraddr failed, %d\n", err);
  151. } else {
  152. brcmf_dbg(TRACE, "MAC address updated to %pM\n",
  153. ifp->mac_addr);
  154. memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  155. }
  156. }
  157. static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
  158. {
  159. struct brcmf_if *ifp = netdev_priv(ndev);
  160. struct sockaddr *sa = (struct sockaddr *)addr;
  161. memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
  162. schedule_work(&ifp->setmacaddr_work);
  163. return 0;
  164. }
  165. static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
  166. {
  167. struct brcmf_if *ifp = netdev_priv(ndev);
  168. schedule_work(&ifp->multicast_work);
  169. }
  170. static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
  171. struct net_device *ndev)
  172. {
  173. int ret;
  174. struct brcmf_if *ifp = netdev_priv(ndev);
  175. struct brcmf_pub *drvr = ifp->drvr;
  176. struct ethhdr *eh;
  177. brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
  178. /* Can the device send data? */
  179. if (drvr->bus_if->state != BRCMF_BUS_UP) {
  180. brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
  181. netif_stop_queue(ndev);
  182. dev_kfree_skb(skb);
  183. ret = -ENODEV;
  184. goto done;
  185. }
  186. if (!drvr->iflist[ifp->bssidx]) {
  187. brcmf_err("bad ifidx %d\n", ifp->bssidx);
  188. netif_stop_queue(ndev);
  189. dev_kfree_skb(skb);
  190. ret = -ENODEV;
  191. goto done;
  192. }
  193. /* Make sure there's enough writable headroom*/
  194. ret = skb_cow_head(skb, drvr->hdrlen);
  195. if (ret < 0) {
  196. brcmf_err("%s: skb_cow_head failed\n",
  197. brcmf_ifname(drvr, ifp->bssidx));
  198. dev_kfree_skb(skb);
  199. goto done;
  200. }
  201. /* validate length for ether packet */
  202. if (skb->len < sizeof(*eh)) {
  203. ret = -EINVAL;
  204. dev_kfree_skb(skb);
  205. goto done;
  206. }
  207. eh = (struct ethhdr *)(skb->data);
  208. if (eh->h_proto == htons(ETH_P_PAE))
  209. atomic_inc(&ifp->pend_8021x_cnt);
  210. ret = brcmf_fws_process_skb(ifp, skb);
  211. done:
  212. if (ret) {
  213. ifp->stats.tx_dropped++;
  214. } else {
  215. ifp->stats.tx_packets++;
  216. ifp->stats.tx_bytes += skb->len;
  217. }
  218. /* Return ok: we always eat the packet */
  219. return NETDEV_TX_OK;
  220. }
  221. void brcmf_txflowblock_if(struct brcmf_if *ifp,
  222. enum brcmf_netif_stop_reason reason, bool state)
  223. {
  224. unsigned long flags;
  225. if (!ifp || !ifp->ndev)
  226. return;
  227. brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
  228. ifp->bssidx, ifp->netif_stop, reason, state);
  229. spin_lock_irqsave(&ifp->netif_stop_lock, flags);
  230. if (state) {
  231. if (!ifp->netif_stop)
  232. netif_stop_queue(ifp->ndev);
  233. ifp->netif_stop |= reason;
  234. } else {
  235. ifp->netif_stop &= ~reason;
  236. if (!ifp->netif_stop)
  237. netif_wake_queue(ifp->ndev);
  238. }
  239. spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
  240. }
  241. void brcmf_txflowblock(struct device *dev, bool state)
  242. {
  243. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  244. struct brcmf_pub *drvr = bus_if->drvr;
  245. brcmf_dbg(TRACE, "Enter\n");
  246. brcmf_fws_bus_blocked(drvr, state);
  247. }
  248. void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
  249. {
  250. skb->dev = ifp->ndev;
  251. skb->protocol = eth_type_trans(skb, skb->dev);
  252. if (skb->pkt_type == PACKET_MULTICAST)
  253. ifp->stats.multicast++;
  254. /* Process special event packets */
  255. brcmf_fweh_process_skb(ifp->drvr, skb);
  256. if (!(ifp->ndev->flags & IFF_UP)) {
  257. brcmu_pkt_buf_free_skb(skb);
  258. return;
  259. }
  260. ifp->stats.rx_bytes += skb->len;
  261. ifp->stats.rx_packets++;
  262. brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
  263. if (in_interrupt())
  264. netif_rx(skb);
  265. else
  266. /* If the receive is not processed inside an ISR,
  267. * the softirqd must be woken explicitly to service
  268. * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
  269. */
  270. netif_rx_ni(skb);
  271. }
  272. static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
  273. u8 start, u8 end,
  274. struct sk_buff_head *skb_list)
  275. {
  276. /* initialize return list */
  277. __skb_queue_head_init(skb_list);
  278. if (rfi->pend_pkts == 0) {
  279. brcmf_dbg(INFO, "no packets in reorder queue\n");
  280. return;
  281. }
  282. do {
  283. if (rfi->pktslots[start]) {
  284. __skb_queue_tail(skb_list, rfi->pktslots[start]);
  285. rfi->pktslots[start] = NULL;
  286. }
  287. start++;
  288. if (start > rfi->max_idx)
  289. start = 0;
  290. } while (start != end);
  291. rfi->pend_pkts -= skb_queue_len(skb_list);
  292. }
  293. static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
  294. struct sk_buff *pkt)
  295. {
  296. u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
  297. struct brcmf_ampdu_rx_reorder *rfi;
  298. struct sk_buff_head reorder_list;
  299. struct sk_buff *pnext;
  300. u8 flags;
  301. u32 buf_size;
  302. flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
  303. flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
  304. /* validate flags and flow id */
  305. if (flags == 0xFF) {
  306. brcmf_err("invalid flags...so ignore this packet\n");
  307. brcmf_netif_rx(ifp, pkt);
  308. return;
  309. }
  310. rfi = ifp->drvr->reorder_flows[flow_id];
  311. if (flags & BRCMF_RXREORDER_DEL_FLOW) {
  312. brcmf_dbg(INFO, "flow-%d: delete\n",
  313. flow_id);
  314. if (rfi == NULL) {
  315. brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
  316. flow_id);
  317. brcmf_netif_rx(ifp, pkt);
  318. return;
  319. }
  320. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
  321. &reorder_list);
  322. /* add the last packet */
  323. __skb_queue_tail(&reorder_list, pkt);
  324. kfree(rfi);
  325. ifp->drvr->reorder_flows[flow_id] = NULL;
  326. goto netif_rx;
  327. }
  328. /* from here on we need a flow reorder instance */
  329. if (rfi == NULL) {
  330. buf_size = sizeof(*rfi);
  331. max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  332. buf_size += (max_idx + 1) * sizeof(pkt);
  333. /* allocate space for flow reorder info */
  334. brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
  335. flow_id, max_idx);
  336. rfi = kzalloc(buf_size, GFP_ATOMIC);
  337. if (rfi == NULL) {
  338. brcmf_err("failed to alloc buffer\n");
  339. brcmf_netif_rx(ifp, pkt);
  340. return;
  341. }
  342. ifp->drvr->reorder_flows[flow_id] = rfi;
  343. rfi->pktslots = (struct sk_buff **)(rfi+1);
  344. rfi->max_idx = max_idx;
  345. }
  346. if (flags & BRCMF_RXREORDER_NEW_HOLE) {
  347. if (rfi->pend_pkts) {
  348. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
  349. rfi->exp_idx,
  350. &reorder_list);
  351. WARN_ON(rfi->pend_pkts);
  352. } else {
  353. __skb_queue_head_init(&reorder_list);
  354. }
  355. rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  356. rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  357. rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  358. rfi->pktslots[rfi->cur_idx] = pkt;
  359. rfi->pend_pkts++;
  360. brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
  361. flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
  362. } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
  363. cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  364. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  365. if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
  366. /* still in the current hole */
  367. /* enqueue the current on the buffer chain */
  368. if (rfi->pktslots[cur_idx] != NULL) {
  369. brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
  370. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  371. rfi->pktslots[cur_idx] = NULL;
  372. }
  373. rfi->pktslots[cur_idx] = pkt;
  374. rfi->pend_pkts++;
  375. rfi->cur_idx = cur_idx;
  376. brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
  377. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  378. /* can return now as there is no reorder
  379. * list to process.
  380. */
  381. return;
  382. }
  383. if (rfi->exp_idx == cur_idx) {
  384. if (rfi->pktslots[cur_idx] != NULL) {
  385. brcmf_dbg(INFO, "error buffer pending..free it\n");
  386. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  387. rfi->pktslots[cur_idx] = NULL;
  388. }
  389. rfi->pktslots[cur_idx] = pkt;
  390. rfi->pend_pkts++;
  391. /* got the expected one. flush from current to expected
  392. * and update expected
  393. */
  394. brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
  395. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  396. rfi->cur_idx = cur_idx;
  397. rfi->exp_idx = exp_idx;
  398. brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
  399. &reorder_list);
  400. brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
  401. flow_id, skb_queue_len(&reorder_list),
  402. rfi->pend_pkts);
  403. } else {
  404. u8 end_idx;
  405. brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
  406. flow_id, flags, rfi->cur_idx, rfi->exp_idx,
  407. cur_idx, exp_idx);
  408. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  409. end_idx = rfi->exp_idx;
  410. else
  411. end_idx = exp_idx;
  412. /* flush pkts first */
  413. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  414. &reorder_list);
  415. if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
  416. __skb_queue_tail(&reorder_list, pkt);
  417. } else {
  418. rfi->pktslots[cur_idx] = pkt;
  419. rfi->pend_pkts++;
  420. }
  421. rfi->exp_idx = exp_idx;
  422. rfi->cur_idx = cur_idx;
  423. }
  424. } else {
  425. /* explicity window move updating the expected index */
  426. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  427. brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
  428. flow_id, flags, rfi->exp_idx, exp_idx);
  429. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  430. end_idx = rfi->exp_idx;
  431. else
  432. end_idx = exp_idx;
  433. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  434. &reorder_list);
  435. __skb_queue_tail(&reorder_list, pkt);
  436. /* set the new expected idx */
  437. rfi->exp_idx = exp_idx;
  438. }
  439. netif_rx:
  440. skb_queue_walk_safe(&reorder_list, pkt, pnext) {
  441. __skb_unlink(pkt, &reorder_list);
  442. brcmf_netif_rx(ifp, pkt);
  443. }
  444. }
  445. void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
  446. {
  447. struct brcmf_if *ifp;
  448. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  449. struct brcmf_pub *drvr = bus_if->drvr;
  450. struct brcmf_skb_reorder_data *rd;
  451. int ret;
  452. brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
  453. /* process and remove protocol-specific header */
  454. ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
  455. if (ret || !ifp || !ifp->ndev) {
  456. if (ret != -ENODATA && ifp)
  457. ifp->stats.rx_errors++;
  458. brcmu_pkt_buf_free_skb(skb);
  459. return;
  460. }
  461. rd = (struct brcmf_skb_reorder_data *)skb->cb;
  462. if (rd->reorder)
  463. brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
  464. else
  465. brcmf_netif_rx(ifp, skb);
  466. }
  467. void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
  468. {
  469. struct ethhdr *eh;
  470. u16 type;
  471. eh = (struct ethhdr *)(txp->data);
  472. type = ntohs(eh->h_proto);
  473. if (type == ETH_P_PAE) {
  474. atomic_dec(&ifp->pend_8021x_cnt);
  475. if (waitqueue_active(&ifp->pend_8021x_wait))
  476. wake_up(&ifp->pend_8021x_wait);
  477. }
  478. if (!success)
  479. ifp->stats.tx_errors++;
  480. brcmu_pkt_buf_free_skb(txp);
  481. }
  482. void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
  483. {
  484. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  485. struct brcmf_pub *drvr = bus_if->drvr;
  486. struct brcmf_if *ifp;
  487. /* await txstatus signal for firmware if active */
  488. if (brcmf_fws_fc_active(drvr->fws)) {
  489. if (!success)
  490. brcmf_fws_bustxfail(drvr->fws, txp);
  491. } else {
  492. if (brcmf_proto_hdrpull(drvr, false, txp, &ifp))
  493. brcmu_pkt_buf_free_skb(txp);
  494. else
  495. brcmf_txfinalize(ifp, txp, success);
  496. }
  497. }
  498. static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
  499. {
  500. struct brcmf_if *ifp = netdev_priv(ndev);
  501. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  502. return &ifp->stats;
  503. }
  504. static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
  505. struct ethtool_drvinfo *info)
  506. {
  507. struct brcmf_if *ifp = netdev_priv(ndev);
  508. struct brcmf_pub *drvr = ifp->drvr;
  509. char drev[BRCMU_DOTREV_LEN] = "n/a";
  510. if (drvr->revinfo.result == 0)
  511. brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
  512. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  513. strlcpy(info->version, drev, sizeof(info->version));
  514. strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
  515. strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
  516. sizeof(info->bus_info));
  517. }
  518. static const struct ethtool_ops brcmf_ethtool_ops = {
  519. .get_drvinfo = brcmf_ethtool_get_drvinfo,
  520. };
  521. static int brcmf_netdev_stop(struct net_device *ndev)
  522. {
  523. struct brcmf_if *ifp = netdev_priv(ndev);
  524. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  525. brcmf_cfg80211_down(ndev);
  526. brcmf_net_setcarrier(ifp, false);
  527. return 0;
  528. }
  529. static int brcmf_netdev_open(struct net_device *ndev)
  530. {
  531. struct brcmf_if *ifp = netdev_priv(ndev);
  532. struct brcmf_pub *drvr = ifp->drvr;
  533. struct brcmf_bus *bus_if = drvr->bus_if;
  534. u32 toe_ol;
  535. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  536. /* If bus is not ready, can't continue */
  537. if (bus_if->state != BRCMF_BUS_UP) {
  538. brcmf_err("failed bus is not ready\n");
  539. return -EAGAIN;
  540. }
  541. atomic_set(&ifp->pend_8021x_cnt, 0);
  542. /* Get current TOE mode from dongle */
  543. if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
  544. && (toe_ol & TOE_TX_CSUM_OL) != 0)
  545. ndev->features |= NETIF_F_IP_CSUM;
  546. else
  547. ndev->features &= ~NETIF_F_IP_CSUM;
  548. if (brcmf_cfg80211_up(ndev)) {
  549. brcmf_err("failed to bring up cfg80211\n");
  550. return -EIO;
  551. }
  552. /* Clear, carrier, set when connected or AP mode. */
  553. netif_carrier_off(ndev);
  554. return 0;
  555. }
  556. static const struct net_device_ops brcmf_netdev_ops_pri = {
  557. .ndo_open = brcmf_netdev_open,
  558. .ndo_stop = brcmf_netdev_stop,
  559. .ndo_get_stats = brcmf_netdev_get_stats,
  560. .ndo_start_xmit = brcmf_netdev_start_xmit,
  561. .ndo_set_mac_address = brcmf_netdev_set_mac_address,
  562. .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
  563. };
  564. int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
  565. {
  566. struct brcmf_pub *drvr = ifp->drvr;
  567. struct net_device *ndev;
  568. s32 err;
  569. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  570. ifp->mac_addr);
  571. ndev = ifp->ndev;
  572. /* set appropriate operations */
  573. ndev->netdev_ops = &brcmf_netdev_ops_pri;
  574. ndev->hard_header_len += drvr->hdrlen;
  575. ndev->ethtool_ops = &brcmf_ethtool_ops;
  576. drvr->rxsz = ndev->mtu + ndev->hard_header_len +
  577. drvr->hdrlen;
  578. /* set the mac address */
  579. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  580. INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
  581. INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
  582. if (rtnl_locked)
  583. err = register_netdevice(ndev);
  584. else
  585. err = register_netdev(ndev);
  586. if (err != 0) {
  587. brcmf_err("couldn't register the net device\n");
  588. goto fail;
  589. }
  590. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  591. return 0;
  592. fail:
  593. drvr->iflist[ifp->bssidx] = NULL;
  594. ndev->netdev_ops = NULL;
  595. free_netdev(ndev);
  596. return -EBADE;
  597. }
  598. static void brcmf_net_detach(struct net_device *ndev)
  599. {
  600. if (ndev->reg_state == NETREG_REGISTERED)
  601. unregister_netdev(ndev);
  602. else
  603. brcmf_cfg80211_free_netdev(ndev);
  604. }
  605. void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
  606. {
  607. struct net_device *ndev;
  608. brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on);
  609. ndev = ifp->ndev;
  610. brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on);
  611. if (on) {
  612. if (!netif_carrier_ok(ndev))
  613. netif_carrier_on(ndev);
  614. } else {
  615. if (netif_carrier_ok(ndev))
  616. netif_carrier_off(ndev);
  617. }
  618. }
  619. static int brcmf_net_p2p_open(struct net_device *ndev)
  620. {
  621. brcmf_dbg(TRACE, "Enter\n");
  622. return brcmf_cfg80211_up(ndev);
  623. }
  624. static int brcmf_net_p2p_stop(struct net_device *ndev)
  625. {
  626. brcmf_dbg(TRACE, "Enter\n");
  627. return brcmf_cfg80211_down(ndev);
  628. }
  629. static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
  630. struct net_device *ndev)
  631. {
  632. if (skb)
  633. dev_kfree_skb_any(skb);
  634. return NETDEV_TX_OK;
  635. }
  636. static const struct net_device_ops brcmf_netdev_ops_p2p = {
  637. .ndo_open = brcmf_net_p2p_open,
  638. .ndo_stop = brcmf_net_p2p_stop,
  639. .ndo_start_xmit = brcmf_net_p2p_start_xmit
  640. };
  641. static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
  642. {
  643. struct net_device *ndev;
  644. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  645. ifp->mac_addr);
  646. ndev = ifp->ndev;
  647. ndev->netdev_ops = &brcmf_netdev_ops_p2p;
  648. /* set the mac address */
  649. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  650. if (register_netdev(ndev) != 0) {
  651. brcmf_err("couldn't register the p2p net device\n");
  652. goto fail;
  653. }
  654. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  655. return 0;
  656. fail:
  657. ifp->drvr->iflist[ifp->bssidx] = NULL;
  658. ndev->netdev_ops = NULL;
  659. free_netdev(ndev);
  660. return -EBADE;
  661. }
  662. struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
  663. bool is_p2pdev, char *name, u8 *mac_addr)
  664. {
  665. struct brcmf_if *ifp;
  666. struct net_device *ndev;
  667. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
  668. ifp = drvr->iflist[bssidx];
  669. /*
  670. * Delete the existing interface before overwriting it
  671. * in case we missed the BRCMF_E_IF_DEL event.
  672. */
  673. if (ifp) {
  674. brcmf_err("ERROR: netdev:%s already exists\n",
  675. ifp->ndev->name);
  676. if (ifidx) {
  677. netif_stop_queue(ifp->ndev);
  678. brcmf_net_detach(ifp->ndev);
  679. drvr->iflist[bssidx] = NULL;
  680. } else {
  681. brcmf_err("ignore IF event\n");
  682. return ERR_PTR(-EINVAL);
  683. }
  684. }
  685. if (!brcmf_p2p_enable && is_p2pdev) {
  686. /* this is P2P_DEVICE interface */
  687. brcmf_dbg(INFO, "allocate non-netdev interface\n");
  688. ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
  689. if (!ifp)
  690. return ERR_PTR(-ENOMEM);
  691. } else {
  692. brcmf_dbg(INFO, "allocate netdev interface\n");
  693. /* Allocate netdev, including space for private structure */
  694. ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name,
  695. NET_NAME_UNKNOWN, ether_setup);
  696. if (!ndev)
  697. return ERR_PTR(-ENOMEM);
  698. ndev->destructor = brcmf_cfg80211_free_netdev;
  699. ifp = netdev_priv(ndev);
  700. ifp->ndev = ndev;
  701. /* store mapping ifidx to bssidx */
  702. if (drvr->if2bss[ifidx] == BRCMF_BSSIDX_INVALID)
  703. drvr->if2bss[ifidx] = bssidx;
  704. }
  705. ifp->drvr = drvr;
  706. drvr->iflist[bssidx] = ifp;
  707. ifp->ifidx = ifidx;
  708. ifp->bssidx = bssidx;
  709. init_waitqueue_head(&ifp->pend_8021x_wait);
  710. spin_lock_init(&ifp->netif_stop_lock);
  711. if (mac_addr != NULL)
  712. memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
  713. brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
  714. current->pid, name, ifp->mac_addr);
  715. return ifp;
  716. }
  717. static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
  718. {
  719. struct brcmf_if *ifp;
  720. ifp = drvr->iflist[bssidx];
  721. drvr->iflist[bssidx] = NULL;
  722. if (!ifp) {
  723. brcmf_err("Null interface, idx=%d\n", bssidx);
  724. return;
  725. }
  726. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
  727. if (drvr->if2bss[ifp->ifidx] == bssidx)
  728. drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
  729. if (ifp->ndev) {
  730. if (bssidx == 0) {
  731. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  732. rtnl_lock();
  733. brcmf_netdev_stop(ifp->ndev);
  734. rtnl_unlock();
  735. }
  736. } else {
  737. netif_stop_queue(ifp->ndev);
  738. }
  739. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  740. cancel_work_sync(&ifp->setmacaddr_work);
  741. cancel_work_sync(&ifp->multicast_work);
  742. }
  743. brcmf_net_detach(ifp->ndev);
  744. } else {
  745. /* Only p2p device interfaces which get dynamically created
  746. * end up here. In this case the p2p module should be informed
  747. * about the removal of the interface within the firmware. If
  748. * not then p2p commands towards the firmware will cause some
  749. * serious troublesome side effects. The p2p module will clean
  750. * up the ifp if needed.
  751. */
  752. brcmf_p2p_ifp_removed(ifp);
  753. kfree(ifp);
  754. }
  755. }
  756. void brcmf_remove_interface(struct brcmf_if *ifp)
  757. {
  758. if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bssidx] != ifp))
  759. return;
  760. brcmf_dbg(TRACE, "Enter, bssidx=%d, ifidx=%d\n", ifp->bssidx,
  761. ifp->ifidx);
  762. brcmf_fws_del_interface(ifp);
  763. brcmf_del_if(ifp->drvr, ifp->bssidx);
  764. }
  765. int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
  766. {
  767. int ifidx;
  768. int bsscfgidx;
  769. bool available;
  770. int highest;
  771. available = false;
  772. bsscfgidx = 2;
  773. highest = 2;
  774. for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
  775. if (drvr->iflist[ifidx]) {
  776. if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
  777. bsscfgidx = highest + 1;
  778. else if (drvr->iflist[ifidx]->bssidx > highest)
  779. highest = drvr->iflist[ifidx]->bssidx;
  780. } else {
  781. available = true;
  782. }
  783. }
  784. return available ? bsscfgidx : -ENOMEM;
  785. }
  786. int brcmf_attach(struct device *dev)
  787. {
  788. struct brcmf_pub *drvr = NULL;
  789. int ret = 0;
  790. int i;
  791. brcmf_dbg(TRACE, "Enter\n");
  792. /* Allocate primary brcmf_info */
  793. drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
  794. if (!drvr)
  795. return -ENOMEM;
  796. for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++)
  797. drvr->if2bss[i] = BRCMF_BSSIDX_INVALID;
  798. mutex_init(&drvr->proto_block);
  799. /* Link to bus module */
  800. drvr->hdrlen = 0;
  801. drvr->bus_if = dev_get_drvdata(dev);
  802. drvr->bus_if->drvr = drvr;
  803. /* attach debug facilities */
  804. brcmf_debug_attach(drvr);
  805. /* Attach and link in the protocol */
  806. ret = brcmf_proto_attach(drvr);
  807. if (ret != 0) {
  808. brcmf_err("brcmf_prot_attach failed\n");
  809. goto fail;
  810. }
  811. /* attach firmware event handler */
  812. brcmf_fweh_attach(drvr);
  813. return ret;
  814. fail:
  815. brcmf_detach(dev);
  816. return ret;
  817. }
  818. static int brcmf_revinfo_read(struct seq_file *s, void *data)
  819. {
  820. struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
  821. struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
  822. char drev[BRCMU_DOTREV_LEN];
  823. char brev[BRCMU_BOARDREV_LEN];
  824. seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
  825. seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
  826. seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
  827. seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
  828. seq_printf(s, "chiprev: %u\n", ri->chiprev);
  829. seq_printf(s, "chippkg: %u\n", ri->chippkg);
  830. seq_printf(s, "corerev: %u\n", ri->corerev);
  831. seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
  832. seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
  833. seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
  834. seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
  835. seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
  836. seq_printf(s, "bus: %u\n", ri->bus);
  837. seq_printf(s, "phytype: %u\n", ri->phytype);
  838. seq_printf(s, "phyrev: %u\n", ri->phyrev);
  839. seq_printf(s, "anarev: %u\n", ri->anarev);
  840. seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
  841. return 0;
  842. }
  843. int brcmf_bus_start(struct device *dev)
  844. {
  845. int ret = -1;
  846. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  847. struct brcmf_pub *drvr = bus_if->drvr;
  848. struct brcmf_if *ifp;
  849. struct brcmf_if *p2p_ifp;
  850. brcmf_dbg(TRACE, "\n");
  851. /* add primary networking interface */
  852. ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", NULL);
  853. if (IS_ERR(ifp))
  854. return PTR_ERR(ifp);
  855. p2p_ifp = NULL;
  856. /* signal bus ready */
  857. brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
  858. /* Bus is ready, do any initialization */
  859. ret = brcmf_c_preinit_dcmds(ifp);
  860. if (ret < 0)
  861. goto fail;
  862. brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
  863. /* assure we have chipid before feature attach */
  864. if (!bus_if->chip) {
  865. bus_if->chip = drvr->revinfo.chipnum;
  866. bus_if->chiprev = drvr->revinfo.chiprev;
  867. brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n",
  868. bus_if->chip, bus_if->chip, bus_if->chiprev);
  869. }
  870. brcmf_feat_attach(drvr);
  871. ret = brcmf_fws_init(drvr);
  872. if (ret < 0)
  873. goto fail;
  874. brcmf_fws_add_interface(ifp);
  875. drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev,
  876. brcmf_p2p_enable);
  877. if (drvr->config == NULL) {
  878. ret = -ENOMEM;
  879. goto fail;
  880. }
  881. ret = brcmf_net_attach(ifp, false);
  882. if ((!ret) && (brcmf_p2p_enable)) {
  883. p2p_ifp = drvr->iflist[1];
  884. if (p2p_ifp)
  885. ret = brcmf_net_p2p_attach(p2p_ifp);
  886. }
  887. fail:
  888. if (ret < 0) {
  889. brcmf_err("failed: %d\n", ret);
  890. if (drvr->config) {
  891. brcmf_cfg80211_detach(drvr->config);
  892. drvr->config = NULL;
  893. }
  894. if (drvr->fws) {
  895. brcmf_fws_del_interface(ifp);
  896. brcmf_fws_deinit(drvr);
  897. }
  898. if (ifp)
  899. brcmf_net_detach(ifp->ndev);
  900. if (p2p_ifp)
  901. brcmf_net_detach(p2p_ifp->ndev);
  902. return ret;
  903. }
  904. return 0;
  905. }
  906. void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
  907. {
  908. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  909. struct brcmf_pub *drvr = bus_if->drvr;
  910. if (drvr) {
  911. drvr->hdrlen += len;
  912. }
  913. }
  914. static void brcmf_bus_detach(struct brcmf_pub *drvr)
  915. {
  916. brcmf_dbg(TRACE, "Enter\n");
  917. if (drvr) {
  918. /* Stop the bus module */
  919. brcmf_bus_stop(drvr->bus_if);
  920. }
  921. }
  922. void brcmf_dev_reset(struct device *dev)
  923. {
  924. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  925. struct brcmf_pub *drvr = bus_if->drvr;
  926. if (drvr == NULL)
  927. return;
  928. if (drvr->iflist[0])
  929. brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
  930. }
  931. void brcmf_detach(struct device *dev)
  932. {
  933. s32 i;
  934. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  935. struct brcmf_pub *drvr = bus_if->drvr;
  936. brcmf_dbg(TRACE, "Enter\n");
  937. if (drvr == NULL)
  938. return;
  939. /* stop firmware event handling */
  940. brcmf_fweh_detach(drvr);
  941. if (drvr->config)
  942. brcmf_p2p_detach(&drvr->config->p2p);
  943. brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
  944. /* make sure primary interface removed last */
  945. for (i = BRCMF_MAX_IFS-1; i > -1; i--)
  946. brcmf_remove_interface(drvr->iflist[i]);
  947. brcmf_cfg80211_detach(drvr->config);
  948. brcmf_fws_deinit(drvr);
  949. brcmf_bus_detach(drvr);
  950. brcmf_proto_detach(drvr);
  951. brcmf_debug_detach(drvr);
  952. bus_if->drvr = NULL;
  953. kfree(drvr);
  954. }
  955. s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
  956. {
  957. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  958. struct brcmf_if *ifp = bus_if->drvr->iflist[0];
  959. return brcmf_fil_iovar_data_set(ifp, name, data, len);
  960. }
  961. static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
  962. {
  963. return atomic_read(&ifp->pend_8021x_cnt);
  964. }
  965. int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
  966. {
  967. int err;
  968. err = wait_event_timeout(ifp->pend_8021x_wait,
  969. !brcmf_get_pend_8021x_cnt(ifp),
  970. msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
  971. WARN_ON(!err);
  972. return !err;
  973. }
  974. void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
  975. {
  976. struct brcmf_pub *drvr = bus->drvr;
  977. struct net_device *ndev;
  978. int ifidx;
  979. brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state);
  980. bus->state = state;
  981. if (state == BRCMF_BUS_UP) {
  982. for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
  983. if ((drvr->iflist[ifidx]) &&
  984. (drvr->iflist[ifidx]->ndev)) {
  985. ndev = drvr->iflist[ifidx]->ndev;
  986. if (netif_queue_stopped(ndev))
  987. netif_wake_queue(ndev);
  988. }
  989. }
  990. }
  991. }
  992. static void brcmf_driver_register(struct work_struct *work)
  993. {
  994. #ifdef CONFIG_BRCMFMAC_SDIO
  995. brcmf_sdio_register();
  996. #endif
  997. #ifdef CONFIG_BRCMFMAC_USB
  998. brcmf_usb_register();
  999. #endif
  1000. #ifdef CONFIG_BRCMFMAC_PCIE
  1001. brcmf_pcie_register();
  1002. #endif
  1003. }
  1004. static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
  1005. static int __init brcmfmac_module_init(void)
  1006. {
  1007. brcmf_debugfs_init();
  1008. #ifdef CONFIG_BRCMFMAC_SDIO
  1009. brcmf_sdio_init();
  1010. #endif
  1011. if (!schedule_work(&brcmf_driver_work))
  1012. return -EBUSY;
  1013. return 0;
  1014. }
  1015. static void __exit brcmfmac_module_exit(void)
  1016. {
  1017. cancel_work_sync(&brcmf_driver_work);
  1018. #ifdef CONFIG_BRCMFMAC_SDIO
  1019. brcmf_sdio_exit();
  1020. #endif
  1021. #ifdef CONFIG_BRCMFMAC_USB
  1022. brcmf_usb_exit();
  1023. #endif
  1024. #ifdef CONFIG_BRCMFMAC_PCIE
  1025. brcmf_pcie_exit();
  1026. #endif
  1027. brcmf_debugfs_exit();
  1028. }
  1029. module_init(brcmfmac_module_init);
  1030. module_exit(brcmfmac_module_exit);