pcie.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107
  1. /* Copyright (c) 2014 Broadcom Corporation
  2. *
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. *
  7. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  8. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  9. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  10. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  11. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  12. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  13. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/firmware.h>
  18. #include <linux/pci.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/delay.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/bcma/bcma.h>
  23. #include <linux/sched.h>
  24. #include <asm/unaligned.h>
  25. #include <soc.h>
  26. #include <chipcommon.h>
  27. #include <brcmu_utils.h>
  28. #include <brcmu_wifi.h>
  29. #include <brcm_hw_ids.h>
  30. #include "debug.h"
  31. #include "bus.h"
  32. #include "commonring.h"
  33. #include "msgbuf.h"
  34. #include "pcie.h"
  35. #include "firmware.h"
  36. #include "chip.h"
  37. enum brcmf_pcie_state {
  38. BRCMFMAC_PCIE_STATE_DOWN,
  39. BRCMFMAC_PCIE_STATE_UP
  40. };
  41. #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
  42. #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
  43. #define BRCMF_PCIE_4350_FW_NAME "brcm/brcmfmac4350-pcie.bin"
  44. #define BRCMF_PCIE_4350_NVRAM_NAME "brcm/brcmfmac4350-pcie.txt"
  45. #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
  46. #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
  47. #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
  48. #define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
  49. #define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin"
  50. #define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt"
  51. #define BRCMF_PCIE_4365_FW_NAME "brcm/brcmfmac4365b-pcie.bin"
  52. #define BRCMF_PCIE_4365_NVRAM_NAME "brcm/brcmfmac4365b-pcie.txt"
  53. #define BRCMF_PCIE_4366_FW_NAME "brcm/brcmfmac4366b-pcie.bin"
  54. #define BRCMF_PCIE_4366_NVRAM_NAME "brcm/brcmfmac4366b-pcie.txt"
  55. #define BRCMF_PCIE_4371_FW_NAME "brcm/brcmfmac4371-pcie.bin"
  56. #define BRCMF_PCIE_4371_NVRAM_NAME "brcm/brcmfmac4371-pcie.txt"
  57. #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
  58. #define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024)
  59. #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
  60. /* backplane addres space accessed by BAR0 */
  61. #define BRCMF_PCIE_BAR0_WINDOW 0x80
  62. #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
  63. #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
  64. #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
  65. #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
  66. #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
  67. #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
  68. #define BRCMF_PCIE_REG_INTSTATUS 0x90
  69. #define BRCMF_PCIE_REG_INTMASK 0x94
  70. #define BRCMF_PCIE_REG_SBMBX 0x98
  71. #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
  72. #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
  73. #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
  74. #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
  75. #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
  76. #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
  77. #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
  78. #define BRCMF_PCIE_GENREV1 1
  79. #define BRCMF_PCIE_GENREV2 2
  80. #define BRCMF_PCIE2_INTA 0x01
  81. #define BRCMF_PCIE2_INTB 0x02
  82. #define BRCMF_PCIE_INT_0 0x01
  83. #define BRCMF_PCIE_INT_1 0x02
  84. #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
  85. BRCMF_PCIE_INT_1)
  86. #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
  87. #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
  88. #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
  89. #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
  90. #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
  91. #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
  92. #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
  93. #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
  94. #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
  95. #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
  96. #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
  97. BRCMF_PCIE_MB_INT_D2H0_DB1 | \
  98. BRCMF_PCIE_MB_INT_D2H1_DB0 | \
  99. BRCMF_PCIE_MB_INT_D2H1_DB1 | \
  100. BRCMF_PCIE_MB_INT_D2H2_DB0 | \
  101. BRCMF_PCIE_MB_INT_D2H2_DB1 | \
  102. BRCMF_PCIE_MB_INT_D2H3_DB0 | \
  103. BRCMF_PCIE_MB_INT_D2H3_DB1)
  104. #define BRCMF_PCIE_MIN_SHARED_VERSION 5
  105. #define BRCMF_PCIE_MAX_SHARED_VERSION 5
  106. #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
  107. #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
  108. #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
  109. #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
  110. #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
  111. #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
  112. #define BRCMF_SHARED_RING_BASE_OFFSET 52
  113. #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
  114. #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
  115. #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
  116. #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
  117. #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
  118. #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
  119. #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
  120. #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
  121. #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
  122. #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
  123. #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
  124. #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
  125. #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
  126. #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
  127. #define BRCMF_RING_MAX_ITEM_OFFSET 4
  128. #define BRCMF_RING_LEN_ITEMS_OFFSET 6
  129. #define BRCMF_RING_MEM_SZ 16
  130. #define BRCMF_RING_STATE_SZ 8
  131. #define BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET 4
  132. #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
  133. #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
  134. #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
  135. #define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
  136. #define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
  137. #define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
  138. #define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
  139. #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
  140. #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
  141. #define BRCMF_DEF_MAX_RXBUFPOST 255
  142. #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
  143. #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
  144. #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
  145. #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
  146. #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
  147. #define BRCMF_D2H_DEV_D3_ACK 0x00000001
  148. #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
  149. #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
  150. #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
  151. #define BRCMF_H2D_HOST_DS_ACK 0x00000002
  152. #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
  153. #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
  154. #define BRCMF_PCIE_MBDATA_TIMEOUT 2000
  155. #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
  156. #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
  157. #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
  158. #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
  159. #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
  160. #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
  161. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
  162. #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
  163. #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
  164. #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
  165. #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
  166. #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
  167. #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
  168. MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
  169. MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
  170. MODULE_FIRMWARE(BRCMF_PCIE_4350_FW_NAME);
  171. MODULE_FIRMWARE(BRCMF_PCIE_4350_NVRAM_NAME);
  172. MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
  173. MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
  174. MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
  175. MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
  176. MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
  177. MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
  178. MODULE_FIRMWARE(BRCMF_PCIE_4365_FW_NAME);
  179. MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME);
  180. MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME);
  181. MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME);
  182. MODULE_FIRMWARE(BRCMF_PCIE_4371_FW_NAME);
  183. MODULE_FIRMWARE(BRCMF_PCIE_4371_NVRAM_NAME);
  184. struct brcmf_pcie_console {
  185. u32 base_addr;
  186. u32 buf_addr;
  187. u32 bufsize;
  188. u32 read_idx;
  189. u8 log_str[256];
  190. u8 log_idx;
  191. };
  192. struct brcmf_pcie_shared_info {
  193. u32 tcm_base_address;
  194. u32 flags;
  195. struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
  196. struct brcmf_pcie_ringbuf *flowrings;
  197. u16 max_rxbufpost;
  198. u32 nrof_flowrings;
  199. u32 rx_dataoffset;
  200. u32 htod_mb_data_addr;
  201. u32 dtoh_mb_data_addr;
  202. u32 ring_info_addr;
  203. struct brcmf_pcie_console console;
  204. void *scratch;
  205. dma_addr_t scratch_dmahandle;
  206. void *ringupd;
  207. dma_addr_t ringupd_dmahandle;
  208. };
  209. struct brcmf_pcie_core_info {
  210. u32 base;
  211. u32 wrapbase;
  212. };
  213. struct brcmf_pciedev_info {
  214. enum brcmf_pcie_state state;
  215. bool in_irq;
  216. bool irq_requested;
  217. struct pci_dev *pdev;
  218. char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
  219. char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
  220. void __iomem *regs;
  221. void __iomem *tcm;
  222. u32 tcm_size;
  223. u32 ram_base;
  224. u32 ram_size;
  225. struct brcmf_chip *ci;
  226. u32 coreid;
  227. u32 generic_corerev;
  228. struct brcmf_pcie_shared_info shared;
  229. void (*ringbell)(struct brcmf_pciedev_info *devinfo);
  230. wait_queue_head_t mbdata_resp_wait;
  231. bool mbdata_completed;
  232. bool irq_allocated;
  233. bool wowl_enabled;
  234. u8 dma_idx_sz;
  235. void *idxbuf;
  236. u32 idxbuf_sz;
  237. dma_addr_t idxbuf_dmahandle;
  238. u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
  239. void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  240. u16 value);
  241. };
  242. struct brcmf_pcie_ringbuf {
  243. struct brcmf_commonring commonring;
  244. dma_addr_t dma_handle;
  245. u32 w_idx_addr;
  246. u32 r_idx_addr;
  247. struct brcmf_pciedev_info *devinfo;
  248. u8 id;
  249. };
  250. static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
  251. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
  252. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
  253. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
  254. BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
  255. BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
  256. };
  257. static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
  258. BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
  259. BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
  260. BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
  261. BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
  262. BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
  263. };
  264. static u32
  265. brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
  266. {
  267. void __iomem *address = devinfo->regs + reg_offset;
  268. return (ioread32(address));
  269. }
  270. static void
  271. brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
  272. u32 value)
  273. {
  274. void __iomem *address = devinfo->regs + reg_offset;
  275. iowrite32(value, address);
  276. }
  277. static u8
  278. brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  279. {
  280. void __iomem *address = devinfo->tcm + mem_offset;
  281. return (ioread8(address));
  282. }
  283. static u16
  284. brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  285. {
  286. void __iomem *address = devinfo->tcm + mem_offset;
  287. return (ioread16(address));
  288. }
  289. static void
  290. brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  291. u16 value)
  292. {
  293. void __iomem *address = devinfo->tcm + mem_offset;
  294. iowrite16(value, address);
  295. }
  296. static u16
  297. brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  298. {
  299. u16 *address = devinfo->idxbuf + mem_offset;
  300. return (*(address));
  301. }
  302. static void
  303. brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  304. u16 value)
  305. {
  306. u16 *address = devinfo->idxbuf + mem_offset;
  307. *(address) = value;
  308. }
  309. static u32
  310. brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  311. {
  312. void __iomem *address = devinfo->tcm + mem_offset;
  313. return (ioread32(address));
  314. }
  315. static void
  316. brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  317. u32 value)
  318. {
  319. void __iomem *address = devinfo->tcm + mem_offset;
  320. iowrite32(value, address);
  321. }
  322. static u32
  323. brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
  324. {
  325. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  326. return (ioread32(addr));
  327. }
  328. static void
  329. brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  330. u32 value)
  331. {
  332. void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
  333. iowrite32(value, addr);
  334. }
  335. static void
  336. brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  337. void *srcaddr, u32 len)
  338. {
  339. void __iomem *address = devinfo->tcm + mem_offset;
  340. __le32 *src32;
  341. __le16 *src16;
  342. u8 *src8;
  343. if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
  344. if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
  345. src8 = (u8 *)srcaddr;
  346. while (len) {
  347. iowrite8(*src8, address);
  348. address++;
  349. src8++;
  350. len--;
  351. }
  352. } else {
  353. len = len / 2;
  354. src16 = (__le16 *)srcaddr;
  355. while (len) {
  356. iowrite16(le16_to_cpu(*src16), address);
  357. address += 2;
  358. src16++;
  359. len--;
  360. }
  361. }
  362. } else {
  363. len = len / 4;
  364. src32 = (__le32 *)srcaddr;
  365. while (len) {
  366. iowrite32(le32_to_cpu(*src32), address);
  367. address += 4;
  368. src32++;
  369. len--;
  370. }
  371. }
  372. }
  373. static void
  374. brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
  375. void *dstaddr, u32 len)
  376. {
  377. void __iomem *address = devinfo->tcm + mem_offset;
  378. __le32 *dst32;
  379. __le16 *dst16;
  380. u8 *dst8;
  381. if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
  382. if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
  383. dst8 = (u8 *)dstaddr;
  384. while (len) {
  385. *dst8 = ioread8(address);
  386. address++;
  387. dst8++;
  388. len--;
  389. }
  390. } else {
  391. len = len / 2;
  392. dst16 = (__le16 *)dstaddr;
  393. while (len) {
  394. *dst16 = cpu_to_le16(ioread16(address));
  395. address += 2;
  396. dst16++;
  397. len--;
  398. }
  399. }
  400. } else {
  401. len = len / 4;
  402. dst32 = (__le32 *)dstaddr;
  403. while (len) {
  404. *dst32 = cpu_to_le32(ioread32(address));
  405. address += 4;
  406. dst32++;
  407. len--;
  408. }
  409. }
  410. }
  411. #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
  412. CHIPCREGOFFS(reg), value)
  413. static void
  414. brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
  415. {
  416. const struct pci_dev *pdev = devinfo->pdev;
  417. struct brcmf_core *core;
  418. u32 bar0_win;
  419. core = brcmf_chip_get_core(devinfo->ci, coreid);
  420. if (core) {
  421. bar0_win = core->base;
  422. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
  423. if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
  424. &bar0_win) == 0) {
  425. if (bar0_win != core->base) {
  426. bar0_win = core->base;
  427. pci_write_config_dword(pdev,
  428. BRCMF_PCIE_BAR0_WINDOW,
  429. bar0_win);
  430. }
  431. }
  432. } else {
  433. brcmf_err("Unsupported core selected %x\n", coreid);
  434. }
  435. }
  436. static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
  437. {
  438. struct brcmf_core *core;
  439. u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
  440. BRCMF_PCIE_CFGREG_PM_CSR,
  441. BRCMF_PCIE_CFGREG_MSI_CAP,
  442. BRCMF_PCIE_CFGREG_MSI_ADDR_L,
  443. BRCMF_PCIE_CFGREG_MSI_ADDR_H,
  444. BRCMF_PCIE_CFGREG_MSI_DATA,
  445. BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
  446. BRCMF_PCIE_CFGREG_RBAR_CTRL,
  447. BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
  448. BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
  449. BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
  450. u32 i;
  451. u32 val;
  452. u32 lsc;
  453. if (!devinfo->ci)
  454. return;
  455. /* Disable ASPM */
  456. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  457. pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  458. &lsc);
  459. val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
  460. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  461. val);
  462. /* Watchdog reset */
  463. brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
  464. WRITECC32(devinfo, watchdog, 4);
  465. msleep(100);
  466. /* Restore ASPM */
  467. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  468. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
  469. lsc);
  470. core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
  471. if (core->rev <= 13) {
  472. for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
  473. brcmf_pcie_write_reg32(devinfo,
  474. BRCMF_PCIE_PCIE2REG_CONFIGADDR,
  475. cfg_offset[i]);
  476. val = brcmf_pcie_read_reg32(devinfo,
  477. BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  478. brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
  479. cfg_offset[i], val);
  480. brcmf_pcie_write_reg32(devinfo,
  481. BRCMF_PCIE_PCIE2REG_CONFIGDATA,
  482. val);
  483. }
  484. }
  485. }
  486. static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
  487. {
  488. u32 config;
  489. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  490. /* BAR1 window may not be sized properly */
  491. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  492. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
  493. config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
  494. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
  495. device_wakeup_enable(&devinfo->pdev->dev);
  496. }
  497. static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
  498. {
  499. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  500. brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
  501. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  502. 5);
  503. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  504. 0);
  505. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
  506. 7);
  507. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
  508. 0);
  509. }
  510. return 0;
  511. }
  512. static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
  513. u32 resetintr)
  514. {
  515. struct brcmf_core *core;
  516. if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
  517. core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
  518. brcmf_chip_resetcore(core, 0, 0, 0);
  519. }
  520. return !brcmf_chip_set_active(devinfo->ci, resetintr);
  521. }
  522. static int
  523. brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
  524. {
  525. struct brcmf_pcie_shared_info *shared;
  526. u32 addr;
  527. u32 cur_htod_mb_data;
  528. u32 i;
  529. shared = &devinfo->shared;
  530. addr = shared->htod_mb_data_addr;
  531. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  532. if (cur_htod_mb_data != 0)
  533. brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
  534. cur_htod_mb_data);
  535. i = 0;
  536. while (cur_htod_mb_data != 0) {
  537. msleep(10);
  538. i++;
  539. if (i > 100)
  540. return -EIO;
  541. cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  542. }
  543. brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
  544. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  545. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
  546. return 0;
  547. }
  548. static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
  549. {
  550. struct brcmf_pcie_shared_info *shared;
  551. u32 addr;
  552. u32 dtoh_mb_data;
  553. shared = &devinfo->shared;
  554. addr = shared->dtoh_mb_data_addr;
  555. dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
  556. if (!dtoh_mb_data)
  557. return;
  558. brcmf_pcie_write_tcm32(devinfo, addr, 0);
  559. brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
  560. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
  561. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
  562. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
  563. brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
  564. }
  565. if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
  566. brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
  567. if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
  568. brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
  569. if (waitqueue_active(&devinfo->mbdata_resp_wait)) {
  570. devinfo->mbdata_completed = true;
  571. wake_up(&devinfo->mbdata_resp_wait);
  572. }
  573. }
  574. }
  575. static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
  576. {
  577. struct brcmf_pcie_shared_info *shared;
  578. struct brcmf_pcie_console *console;
  579. u32 addr;
  580. shared = &devinfo->shared;
  581. console = &shared->console;
  582. addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
  583. console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  584. addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
  585. console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  586. addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
  587. console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
  588. brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
  589. console->base_addr, console->buf_addr, console->bufsize);
  590. }
  591. static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
  592. {
  593. struct brcmf_pcie_console *console;
  594. u32 addr;
  595. u8 ch;
  596. u32 newidx;
  597. if (!BRCMF_FWCON_ON())
  598. return;
  599. console = &devinfo->shared.console;
  600. addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
  601. newidx = brcmf_pcie_read_tcm32(devinfo, addr);
  602. while (newidx != console->read_idx) {
  603. addr = console->buf_addr + console->read_idx;
  604. ch = brcmf_pcie_read_tcm8(devinfo, addr);
  605. console->read_idx++;
  606. if (console->read_idx == console->bufsize)
  607. console->read_idx = 0;
  608. if (ch == '\r')
  609. continue;
  610. console->log_str[console->log_idx] = ch;
  611. console->log_idx++;
  612. if ((ch != '\n') &&
  613. (console->log_idx == (sizeof(console->log_str) - 2))) {
  614. ch = '\n';
  615. console->log_str[console->log_idx] = ch;
  616. console->log_idx++;
  617. }
  618. if (ch == '\n') {
  619. console->log_str[console->log_idx] = 0;
  620. pr_debug("CONSOLE: %s", console->log_str);
  621. console->log_idx = 0;
  622. }
  623. }
  624. }
  625. static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo)
  626. {
  627. u32 reg_value;
  628. brcmf_dbg(PCIE, "RING !\n");
  629. reg_value = brcmf_pcie_read_reg32(devinfo,
  630. BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  631. reg_value |= BRCMF_PCIE2_INTB;
  632. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  633. reg_value);
  634. }
  635. static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo)
  636. {
  637. brcmf_dbg(PCIE, "RING !\n");
  638. /* Any arbitrary value will do, lets use 1 */
  639. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
  640. }
  641. static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
  642. {
  643. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
  644. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
  645. 0);
  646. else
  647. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
  648. 0);
  649. }
  650. static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
  651. {
  652. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
  653. pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
  654. BRCMF_PCIE_INT_DEF);
  655. else
  656. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
  657. BRCMF_PCIE_MB_INT_D2H_DB |
  658. BRCMF_PCIE_MB_INT_FN0_0 |
  659. BRCMF_PCIE_MB_INT_FN0_1);
  660. }
  661. static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg)
  662. {
  663. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  664. u32 status;
  665. status = 0;
  666. pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  667. if (status) {
  668. brcmf_pcie_intr_disable(devinfo);
  669. brcmf_dbg(PCIE, "Enter\n");
  670. return IRQ_WAKE_THREAD;
  671. }
  672. return IRQ_NONE;
  673. }
  674. static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
  675. {
  676. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  677. if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
  678. brcmf_pcie_intr_disable(devinfo);
  679. brcmf_dbg(PCIE, "Enter\n");
  680. return IRQ_WAKE_THREAD;
  681. }
  682. return IRQ_NONE;
  683. }
  684. static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg)
  685. {
  686. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  687. const struct pci_dev *pdev = devinfo->pdev;
  688. u32 status;
  689. devinfo->in_irq = true;
  690. status = 0;
  691. pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  692. brcmf_dbg(PCIE, "Enter %x\n", status);
  693. if (status) {
  694. pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
  695. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  696. brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
  697. }
  698. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  699. brcmf_pcie_intr_enable(devinfo);
  700. devinfo->in_irq = false;
  701. return IRQ_HANDLED;
  702. }
  703. static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg)
  704. {
  705. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
  706. u32 status;
  707. devinfo->in_irq = true;
  708. status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  709. brcmf_dbg(PCIE, "Enter %x\n", status);
  710. if (status) {
  711. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  712. status);
  713. if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
  714. BRCMF_PCIE_MB_INT_FN0_1))
  715. brcmf_pcie_handle_mb_data(devinfo);
  716. if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
  717. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  718. brcmf_proto_msgbuf_rx_trigger(
  719. &devinfo->pdev->dev);
  720. }
  721. }
  722. brcmf_pcie_bus_console_read(devinfo);
  723. if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
  724. brcmf_pcie_intr_enable(devinfo);
  725. devinfo->in_irq = false;
  726. return IRQ_HANDLED;
  727. }
  728. static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
  729. {
  730. struct pci_dev *pdev;
  731. pdev = devinfo->pdev;
  732. brcmf_pcie_intr_disable(devinfo);
  733. brcmf_dbg(PCIE, "Enter\n");
  734. /* is it a v1 or v2 implementation */
  735. devinfo->irq_requested = false;
  736. pci_enable_msi(pdev);
  737. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
  738. if (request_threaded_irq(pdev->irq,
  739. brcmf_pcie_quick_check_isr_v1,
  740. brcmf_pcie_isr_thread_v1,
  741. IRQF_SHARED, "brcmf_pcie_intr",
  742. devinfo)) {
  743. pci_disable_msi(pdev);
  744. brcmf_err("Failed to request IRQ %d\n", pdev->irq);
  745. return -EIO;
  746. }
  747. } else {
  748. if (request_threaded_irq(pdev->irq,
  749. brcmf_pcie_quick_check_isr_v2,
  750. brcmf_pcie_isr_thread_v2,
  751. IRQF_SHARED, "brcmf_pcie_intr",
  752. devinfo)) {
  753. pci_disable_msi(pdev);
  754. brcmf_err("Failed to request IRQ %d\n", pdev->irq);
  755. return -EIO;
  756. }
  757. }
  758. devinfo->irq_requested = true;
  759. devinfo->irq_allocated = true;
  760. return 0;
  761. }
  762. static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
  763. {
  764. struct pci_dev *pdev;
  765. u32 status;
  766. u32 count;
  767. if (!devinfo->irq_allocated)
  768. return;
  769. pdev = devinfo->pdev;
  770. brcmf_pcie_intr_disable(devinfo);
  771. if (!devinfo->irq_requested)
  772. return;
  773. devinfo->irq_requested = false;
  774. free_irq(pdev->irq, devinfo);
  775. pci_disable_msi(pdev);
  776. msleep(50);
  777. count = 0;
  778. while ((devinfo->in_irq) && (count < 20)) {
  779. msleep(50);
  780. count++;
  781. }
  782. if (devinfo->in_irq)
  783. brcmf_err("Still in IRQ (processing) !!!\n");
  784. if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
  785. status = 0;
  786. pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
  787. pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
  788. } else {
  789. status = brcmf_pcie_read_reg32(devinfo,
  790. BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  791. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  792. status);
  793. }
  794. devinfo->irq_allocated = false;
  795. }
  796. static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
  797. {
  798. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  799. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  800. struct brcmf_commonring *commonring = &ring->commonring;
  801. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  802. return -EIO;
  803. brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  804. commonring->w_ptr, ring->id);
  805. devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
  806. return 0;
  807. }
  808. static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
  809. {
  810. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  811. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  812. struct brcmf_commonring *commonring = &ring->commonring;
  813. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  814. return -EIO;
  815. brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  816. commonring->r_ptr, ring->id);
  817. devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
  818. return 0;
  819. }
  820. static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
  821. {
  822. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  823. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  824. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  825. return -EIO;
  826. devinfo->ringbell(devinfo);
  827. return 0;
  828. }
  829. static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
  830. {
  831. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  832. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  833. struct brcmf_commonring *commonring = &ring->commonring;
  834. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  835. return -EIO;
  836. commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
  837. brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
  838. commonring->w_ptr, ring->id);
  839. return 0;
  840. }
  841. static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
  842. {
  843. struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
  844. struct brcmf_pciedev_info *devinfo = ring->devinfo;
  845. struct brcmf_commonring *commonring = &ring->commonring;
  846. if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
  847. return -EIO;
  848. commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
  849. brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
  850. commonring->r_ptr, ring->id);
  851. return 0;
  852. }
  853. static void *
  854. brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
  855. u32 size, u32 tcm_dma_phys_addr,
  856. dma_addr_t *dma_handle)
  857. {
  858. void *ring;
  859. u64 address;
  860. ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
  861. GFP_KERNEL);
  862. if (!ring)
  863. return NULL;
  864. address = (u64)*dma_handle;
  865. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
  866. address & 0xffffffff);
  867. brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
  868. memset(ring, 0, size);
  869. return (ring);
  870. }
  871. static struct brcmf_pcie_ringbuf *
  872. brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
  873. u32 tcm_ring_phys_addr)
  874. {
  875. void *dma_buf;
  876. dma_addr_t dma_handle;
  877. struct brcmf_pcie_ringbuf *ring;
  878. u32 size;
  879. u32 addr;
  880. size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id];
  881. dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
  882. tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
  883. &dma_handle);
  884. if (!dma_buf)
  885. return NULL;
  886. addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
  887. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
  888. addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
  889. brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]);
  890. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  891. if (!ring) {
  892. dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
  893. dma_handle);
  894. return NULL;
  895. }
  896. brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
  897. brcmf_ring_itemsize[ring_id], dma_buf);
  898. ring->dma_handle = dma_handle;
  899. ring->devinfo = devinfo;
  900. brcmf_commonring_register_cb(&ring->commonring,
  901. brcmf_pcie_ring_mb_ring_bell,
  902. brcmf_pcie_ring_mb_update_rptr,
  903. brcmf_pcie_ring_mb_update_wptr,
  904. brcmf_pcie_ring_mb_write_rptr,
  905. brcmf_pcie_ring_mb_write_wptr, ring);
  906. return (ring);
  907. }
  908. static void brcmf_pcie_release_ringbuffer(struct device *dev,
  909. struct brcmf_pcie_ringbuf *ring)
  910. {
  911. void *dma_buf;
  912. u32 size;
  913. if (!ring)
  914. return;
  915. dma_buf = ring->commonring.buf_addr;
  916. if (dma_buf) {
  917. size = ring->commonring.depth * ring->commonring.item_len;
  918. dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
  919. }
  920. kfree(ring);
  921. }
  922. static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
  923. {
  924. u32 i;
  925. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  926. brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
  927. devinfo->shared.commonrings[i]);
  928. devinfo->shared.commonrings[i] = NULL;
  929. }
  930. kfree(devinfo->shared.flowrings);
  931. devinfo->shared.flowrings = NULL;
  932. if (devinfo->idxbuf) {
  933. dma_free_coherent(&devinfo->pdev->dev,
  934. devinfo->idxbuf_sz,
  935. devinfo->idxbuf,
  936. devinfo->idxbuf_dmahandle);
  937. devinfo->idxbuf = NULL;
  938. }
  939. }
  940. static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
  941. {
  942. struct brcmf_pcie_ringbuf *ring;
  943. struct brcmf_pcie_ringbuf *rings;
  944. u32 ring_addr;
  945. u32 d2h_w_idx_ptr;
  946. u32 d2h_r_idx_ptr;
  947. u32 h2d_w_idx_ptr;
  948. u32 h2d_r_idx_ptr;
  949. u32 addr;
  950. u32 ring_mem_ptr;
  951. u32 i;
  952. u64 address;
  953. u32 bufsz;
  954. u16 max_sub_queues;
  955. u8 idx_offset;
  956. ring_addr = devinfo->shared.ring_info_addr;
  957. brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
  958. addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
  959. max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
  960. if (devinfo->dma_idx_sz != 0) {
  961. bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
  962. devinfo->dma_idx_sz * 2;
  963. devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
  964. &devinfo->idxbuf_dmahandle,
  965. GFP_KERNEL);
  966. if (!devinfo->idxbuf)
  967. devinfo->dma_idx_sz = 0;
  968. }
  969. if (devinfo->dma_idx_sz == 0) {
  970. addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
  971. d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  972. addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
  973. d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  974. addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
  975. h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  976. addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
  977. h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  978. idx_offset = sizeof(u32);
  979. devinfo->write_ptr = brcmf_pcie_write_tcm16;
  980. devinfo->read_ptr = brcmf_pcie_read_tcm16;
  981. brcmf_dbg(PCIE, "Using TCM indices\n");
  982. } else {
  983. memset(devinfo->idxbuf, 0, bufsz);
  984. devinfo->idxbuf_sz = bufsz;
  985. idx_offset = devinfo->dma_idx_sz;
  986. devinfo->write_ptr = brcmf_pcie_write_idx;
  987. devinfo->read_ptr = brcmf_pcie_read_idx;
  988. h2d_w_idx_ptr = 0;
  989. addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
  990. address = (u64)devinfo->idxbuf_dmahandle;
  991. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  992. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  993. h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
  994. addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
  995. address += max_sub_queues * idx_offset;
  996. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  997. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  998. d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
  999. addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
  1000. address += max_sub_queues * idx_offset;
  1001. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  1002. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  1003. d2h_r_idx_ptr = d2h_w_idx_ptr +
  1004. BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
  1005. addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
  1006. address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
  1007. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  1008. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  1009. brcmf_dbg(PCIE, "Using host memory indices\n");
  1010. }
  1011. addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
  1012. ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
  1013. for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
  1014. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  1015. if (!ring)
  1016. goto fail;
  1017. ring->w_idx_addr = h2d_w_idx_ptr;
  1018. ring->r_idx_addr = h2d_r_idx_ptr;
  1019. ring->id = i;
  1020. devinfo->shared.commonrings[i] = ring;
  1021. h2d_w_idx_ptr += idx_offset;
  1022. h2d_r_idx_ptr += idx_offset;
  1023. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  1024. }
  1025. for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
  1026. i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
  1027. ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
  1028. if (!ring)
  1029. goto fail;
  1030. ring->w_idx_addr = d2h_w_idx_ptr;
  1031. ring->r_idx_addr = d2h_r_idx_ptr;
  1032. ring->id = i;
  1033. devinfo->shared.commonrings[i] = ring;
  1034. d2h_w_idx_ptr += idx_offset;
  1035. d2h_r_idx_ptr += idx_offset;
  1036. ring_mem_ptr += BRCMF_RING_MEM_SZ;
  1037. }
  1038. devinfo->shared.nrof_flowrings =
  1039. max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
  1040. rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
  1041. GFP_KERNEL);
  1042. if (!rings)
  1043. goto fail;
  1044. brcmf_dbg(PCIE, "Nr of flowrings is %d\n",
  1045. devinfo->shared.nrof_flowrings);
  1046. for (i = 0; i < devinfo->shared.nrof_flowrings; i++) {
  1047. ring = &rings[i];
  1048. ring->devinfo = devinfo;
  1049. ring->id = i + BRCMF_NROF_COMMON_MSGRINGS;
  1050. brcmf_commonring_register_cb(&ring->commonring,
  1051. brcmf_pcie_ring_mb_ring_bell,
  1052. brcmf_pcie_ring_mb_update_rptr,
  1053. brcmf_pcie_ring_mb_update_wptr,
  1054. brcmf_pcie_ring_mb_write_rptr,
  1055. brcmf_pcie_ring_mb_write_wptr,
  1056. ring);
  1057. ring->w_idx_addr = h2d_w_idx_ptr;
  1058. ring->r_idx_addr = h2d_r_idx_ptr;
  1059. h2d_w_idx_ptr += idx_offset;
  1060. h2d_r_idx_ptr += idx_offset;
  1061. }
  1062. devinfo->shared.flowrings = rings;
  1063. return 0;
  1064. fail:
  1065. brcmf_err("Allocating ring buffers failed\n");
  1066. brcmf_pcie_release_ringbuffers(devinfo);
  1067. return -ENOMEM;
  1068. }
  1069. static void
  1070. brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  1071. {
  1072. if (devinfo->shared.scratch)
  1073. dma_free_coherent(&devinfo->pdev->dev,
  1074. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  1075. devinfo->shared.scratch,
  1076. devinfo->shared.scratch_dmahandle);
  1077. if (devinfo->shared.ringupd)
  1078. dma_free_coherent(&devinfo->pdev->dev,
  1079. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  1080. devinfo->shared.ringupd,
  1081. devinfo->shared.ringupd_dmahandle);
  1082. }
  1083. static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
  1084. {
  1085. u64 address;
  1086. u32 addr;
  1087. devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
  1088. BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
  1089. &devinfo->shared.scratch_dmahandle, GFP_KERNEL);
  1090. if (!devinfo->shared.scratch)
  1091. goto fail;
  1092. memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  1093. addr = devinfo->shared.tcm_base_address +
  1094. BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
  1095. address = (u64)devinfo->shared.scratch_dmahandle;
  1096. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  1097. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  1098. addr = devinfo->shared.tcm_base_address +
  1099. BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
  1100. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
  1101. devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
  1102. BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
  1103. &devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
  1104. if (!devinfo->shared.ringupd)
  1105. goto fail;
  1106. memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  1107. addr = devinfo->shared.tcm_base_address +
  1108. BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
  1109. address = (u64)devinfo->shared.ringupd_dmahandle;
  1110. brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
  1111. brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
  1112. addr = devinfo->shared.tcm_base_address +
  1113. BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
  1114. brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
  1115. return 0;
  1116. fail:
  1117. brcmf_err("Allocating scratch buffers failed\n");
  1118. brcmf_pcie_release_scratchbuffers(devinfo);
  1119. return -ENOMEM;
  1120. }
  1121. static void brcmf_pcie_down(struct device *dev)
  1122. {
  1123. }
  1124. static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
  1125. {
  1126. return 0;
  1127. }
  1128. static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
  1129. uint len)
  1130. {
  1131. return 0;
  1132. }
  1133. static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
  1134. uint len)
  1135. {
  1136. return 0;
  1137. }
  1138. static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
  1139. {
  1140. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1141. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1142. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1143. brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
  1144. devinfo->wowl_enabled = enabled;
  1145. if (enabled)
  1146. device_set_wakeup_enable(&devinfo->pdev->dev, true);
  1147. else
  1148. device_set_wakeup_enable(&devinfo->pdev->dev, false);
  1149. }
  1150. static size_t brcmf_pcie_get_ramsize(struct device *dev)
  1151. {
  1152. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1153. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1154. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1155. return devinfo->ci->ramsize - devinfo->ci->srsize;
  1156. }
  1157. static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
  1158. {
  1159. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1160. struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
  1161. struct brcmf_pciedev_info *devinfo = buspub->devinfo;
  1162. brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
  1163. brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
  1164. return 0;
  1165. }
  1166. static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
  1167. .txdata = brcmf_pcie_tx,
  1168. .stop = brcmf_pcie_down,
  1169. .txctl = brcmf_pcie_tx_ctlpkt,
  1170. .rxctl = brcmf_pcie_rx_ctlpkt,
  1171. .wowl_config = brcmf_pcie_wowl_config,
  1172. .get_ramsize = brcmf_pcie_get_ramsize,
  1173. .get_memdump = brcmf_pcie_get_memdump,
  1174. };
  1175. static int
  1176. brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
  1177. u32 sharedram_addr)
  1178. {
  1179. struct brcmf_pcie_shared_info *shared;
  1180. u32 addr;
  1181. u32 version;
  1182. shared = &devinfo->shared;
  1183. shared->tcm_base_address = sharedram_addr;
  1184. shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
  1185. version = shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK;
  1186. brcmf_dbg(PCIE, "PCIe protocol version %d\n", version);
  1187. if ((version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
  1188. (version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
  1189. brcmf_err("Unsupported PCIE version %d\n", version);
  1190. return -EINVAL;
  1191. }
  1192. /* check firmware support dma indicies */
  1193. if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
  1194. if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
  1195. devinfo->dma_idx_sz = sizeof(u16);
  1196. else
  1197. devinfo->dma_idx_sz = sizeof(u32);
  1198. }
  1199. addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
  1200. shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
  1201. if (shared->max_rxbufpost == 0)
  1202. shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
  1203. addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
  1204. shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
  1205. addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
  1206. shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1207. addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
  1208. shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1209. addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
  1210. shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
  1211. brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
  1212. shared->max_rxbufpost, shared->rx_dataoffset);
  1213. brcmf_pcie_bus_console_init(devinfo);
  1214. return 0;
  1215. }
  1216. static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
  1217. {
  1218. char *fw_name;
  1219. char *nvram_name;
  1220. uint fw_len, nv_len;
  1221. char end;
  1222. brcmf_dbg(PCIE, "Enter, chip 0x%04x chiprev %d\n", devinfo->ci->chip,
  1223. devinfo->ci->chiprev);
  1224. switch (devinfo->ci->chip) {
  1225. case BRCM_CC_43602_CHIP_ID:
  1226. fw_name = BRCMF_PCIE_43602_FW_NAME;
  1227. nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
  1228. break;
  1229. case BRCM_CC_4350_CHIP_ID:
  1230. fw_name = BRCMF_PCIE_4350_FW_NAME;
  1231. nvram_name = BRCMF_PCIE_4350_NVRAM_NAME;
  1232. break;
  1233. case BRCM_CC_4356_CHIP_ID:
  1234. fw_name = BRCMF_PCIE_4356_FW_NAME;
  1235. nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
  1236. break;
  1237. case BRCM_CC_43567_CHIP_ID:
  1238. case BRCM_CC_43569_CHIP_ID:
  1239. case BRCM_CC_43570_CHIP_ID:
  1240. fw_name = BRCMF_PCIE_43570_FW_NAME;
  1241. nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
  1242. break;
  1243. case BRCM_CC_4358_CHIP_ID:
  1244. fw_name = BRCMF_PCIE_4358_FW_NAME;
  1245. nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
  1246. break;
  1247. case BRCM_CC_4365_CHIP_ID:
  1248. fw_name = BRCMF_PCIE_4365_FW_NAME;
  1249. nvram_name = BRCMF_PCIE_4365_NVRAM_NAME;
  1250. break;
  1251. case BRCM_CC_4366_CHIP_ID:
  1252. fw_name = BRCMF_PCIE_4366_FW_NAME;
  1253. nvram_name = BRCMF_PCIE_4366_NVRAM_NAME;
  1254. break;
  1255. case BRCM_CC_4371_CHIP_ID:
  1256. fw_name = BRCMF_PCIE_4371_FW_NAME;
  1257. nvram_name = BRCMF_PCIE_4371_NVRAM_NAME;
  1258. break;
  1259. default:
  1260. brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
  1261. return -ENODEV;
  1262. }
  1263. fw_len = sizeof(devinfo->fw_name) - 1;
  1264. nv_len = sizeof(devinfo->nvram_name) - 1;
  1265. /* check if firmware path is provided by module parameter */
  1266. if (brcmf_firmware_path[0] != '\0') {
  1267. strncpy(devinfo->fw_name, brcmf_firmware_path, fw_len);
  1268. strncpy(devinfo->nvram_name, brcmf_firmware_path, nv_len);
  1269. fw_len -= strlen(devinfo->fw_name);
  1270. nv_len -= strlen(devinfo->nvram_name);
  1271. end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
  1272. if (end != '/') {
  1273. strncat(devinfo->fw_name, "/", fw_len);
  1274. strncat(devinfo->nvram_name, "/", nv_len);
  1275. fw_len--;
  1276. nv_len--;
  1277. }
  1278. }
  1279. strncat(devinfo->fw_name, fw_name, fw_len);
  1280. strncat(devinfo->nvram_name, nvram_name, nv_len);
  1281. return 0;
  1282. }
  1283. static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
  1284. const struct firmware *fw, void *nvram,
  1285. u32 nvram_len)
  1286. {
  1287. u32 sharedram_addr;
  1288. u32 sharedram_addr_written;
  1289. u32 loop_counter;
  1290. int err;
  1291. u32 address;
  1292. u32 resetintr;
  1293. devinfo->ringbell = brcmf_pcie_ringbell_v2;
  1294. devinfo->generic_corerev = BRCMF_PCIE_GENREV2;
  1295. brcmf_dbg(PCIE, "Halt ARM.\n");
  1296. err = brcmf_pcie_enter_download_state(devinfo);
  1297. if (err)
  1298. return err;
  1299. brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
  1300. brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
  1301. (void *)fw->data, fw->size);
  1302. resetintr = get_unaligned_le32(fw->data);
  1303. release_firmware(fw);
  1304. /* reset last 4 bytes of RAM address. to be used for shared
  1305. * area. This identifies when FW is running
  1306. */
  1307. brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
  1308. if (nvram) {
  1309. brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
  1310. address = devinfo->ci->rambase + devinfo->ci->ramsize -
  1311. nvram_len;
  1312. brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
  1313. brcmf_fw_nvram_free(nvram);
  1314. } else {
  1315. brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
  1316. devinfo->nvram_name);
  1317. }
  1318. sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
  1319. devinfo->ci->ramsize -
  1320. 4);
  1321. brcmf_dbg(PCIE, "Bring ARM in running state\n");
  1322. err = brcmf_pcie_exit_download_state(devinfo, resetintr);
  1323. if (err)
  1324. return err;
  1325. brcmf_dbg(PCIE, "Wait for FW init\n");
  1326. sharedram_addr = sharedram_addr_written;
  1327. loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
  1328. while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
  1329. msleep(50);
  1330. sharedram_addr = brcmf_pcie_read_ram32(devinfo,
  1331. devinfo->ci->ramsize -
  1332. 4);
  1333. loop_counter--;
  1334. }
  1335. if (sharedram_addr == sharedram_addr_written) {
  1336. brcmf_err("FW failed to initialize\n");
  1337. return -ENODEV;
  1338. }
  1339. brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
  1340. return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
  1341. }
  1342. static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
  1343. {
  1344. struct pci_dev *pdev;
  1345. int err;
  1346. phys_addr_t bar0_addr, bar1_addr;
  1347. ulong bar1_size;
  1348. pdev = devinfo->pdev;
  1349. err = pci_enable_device(pdev);
  1350. if (err) {
  1351. brcmf_err("pci_enable_device failed err=%d\n", err);
  1352. return err;
  1353. }
  1354. pci_set_master(pdev);
  1355. /* Bar-0 mapped address */
  1356. bar0_addr = pci_resource_start(pdev, 0);
  1357. /* Bar-1 mapped address */
  1358. bar1_addr = pci_resource_start(pdev, 2);
  1359. /* read Bar-1 mapped memory range */
  1360. bar1_size = pci_resource_len(pdev, 2);
  1361. if ((bar1_size == 0) || (bar1_addr == 0)) {
  1362. brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
  1363. bar1_size, (unsigned long long)bar1_addr);
  1364. return -EINVAL;
  1365. }
  1366. devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
  1367. devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE);
  1368. devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE;
  1369. if (!devinfo->regs || !devinfo->tcm) {
  1370. brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
  1371. devinfo->tcm);
  1372. return -EINVAL;
  1373. }
  1374. brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
  1375. devinfo->regs, (unsigned long long)bar0_addr);
  1376. brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n",
  1377. devinfo->tcm, (unsigned long long)bar1_addr);
  1378. return 0;
  1379. }
  1380. static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
  1381. {
  1382. if (devinfo->tcm)
  1383. iounmap(devinfo->tcm);
  1384. if (devinfo->regs)
  1385. iounmap(devinfo->regs);
  1386. pci_disable_device(devinfo->pdev);
  1387. }
  1388. static int brcmf_pcie_attach_bus(struct device *dev)
  1389. {
  1390. int ret;
  1391. /* Attach to the common driver interface */
  1392. ret = brcmf_attach(dev);
  1393. if (ret) {
  1394. brcmf_err("brcmf_attach failed\n");
  1395. } else {
  1396. ret = brcmf_bus_start(dev);
  1397. if (ret)
  1398. brcmf_err("dongle is not responding\n");
  1399. }
  1400. return ret;
  1401. }
  1402. static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
  1403. {
  1404. u32 ret_addr;
  1405. ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1406. addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
  1407. pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
  1408. return ret_addr;
  1409. }
  1410. static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
  1411. {
  1412. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1413. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1414. return brcmf_pcie_read_reg32(devinfo, addr);
  1415. }
  1416. static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
  1417. {
  1418. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1419. addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
  1420. brcmf_pcie_write_reg32(devinfo, addr, value);
  1421. }
  1422. static int brcmf_pcie_buscoreprep(void *ctx)
  1423. {
  1424. return brcmf_pcie_get_resource(ctx);
  1425. }
  1426. static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
  1427. {
  1428. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1429. u32 val;
  1430. devinfo->ci = chip;
  1431. brcmf_pcie_reset_device(devinfo);
  1432. val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
  1433. if (val != 0xffffffff)
  1434. brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
  1435. val);
  1436. return 0;
  1437. }
  1438. static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
  1439. u32 rstvec)
  1440. {
  1441. struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
  1442. brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
  1443. }
  1444. static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
  1445. .prepare = brcmf_pcie_buscoreprep,
  1446. .reset = brcmf_pcie_buscore_reset,
  1447. .activate = brcmf_pcie_buscore_activate,
  1448. .read32 = brcmf_pcie_buscore_read32,
  1449. .write32 = brcmf_pcie_buscore_write32,
  1450. };
  1451. static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
  1452. void *nvram, u32 nvram_len)
  1453. {
  1454. struct brcmf_bus *bus = dev_get_drvdata(dev);
  1455. struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
  1456. struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
  1457. struct brcmf_commonring **flowrings;
  1458. int ret;
  1459. u32 i;
  1460. brcmf_pcie_attach(devinfo);
  1461. ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
  1462. if (ret)
  1463. goto fail;
  1464. devinfo->state = BRCMFMAC_PCIE_STATE_UP;
  1465. ret = brcmf_pcie_init_ringbuffers(devinfo);
  1466. if (ret)
  1467. goto fail;
  1468. ret = brcmf_pcie_init_scratchbuffers(devinfo);
  1469. if (ret)
  1470. goto fail;
  1471. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1472. ret = brcmf_pcie_request_irq(devinfo);
  1473. if (ret)
  1474. goto fail;
  1475. /* hook the commonrings in the bus structure. */
  1476. for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
  1477. bus->msgbuf->commonrings[i] =
  1478. &devinfo->shared.commonrings[i]->commonring;
  1479. flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
  1480. GFP_KERNEL);
  1481. if (!flowrings)
  1482. goto fail;
  1483. for (i = 0; i < devinfo->shared.nrof_flowrings; i++)
  1484. flowrings[i] = &devinfo->shared.flowrings[i].commonring;
  1485. bus->msgbuf->flowrings = flowrings;
  1486. bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
  1487. bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
  1488. bus->msgbuf->nrof_flowrings = devinfo->shared.nrof_flowrings;
  1489. init_waitqueue_head(&devinfo->mbdata_resp_wait);
  1490. brcmf_pcie_intr_enable(devinfo);
  1491. if (brcmf_pcie_attach_bus(bus->dev) == 0)
  1492. return;
  1493. brcmf_pcie_bus_console_read(devinfo);
  1494. fail:
  1495. device_release_driver(dev);
  1496. }
  1497. static int
  1498. brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1499. {
  1500. int ret;
  1501. struct brcmf_pciedev_info *devinfo;
  1502. struct brcmf_pciedev *pcie_bus_dev;
  1503. struct brcmf_bus *bus;
  1504. u16 domain_nr;
  1505. u16 bus_nr;
  1506. domain_nr = pci_domain_nr(pdev->bus) + 1;
  1507. bus_nr = pdev->bus->number;
  1508. brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
  1509. domain_nr, bus_nr);
  1510. ret = -ENOMEM;
  1511. devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
  1512. if (devinfo == NULL)
  1513. return ret;
  1514. devinfo->pdev = pdev;
  1515. pcie_bus_dev = NULL;
  1516. devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
  1517. if (IS_ERR(devinfo->ci)) {
  1518. ret = PTR_ERR(devinfo->ci);
  1519. devinfo->ci = NULL;
  1520. goto fail;
  1521. }
  1522. pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
  1523. if (pcie_bus_dev == NULL) {
  1524. ret = -ENOMEM;
  1525. goto fail;
  1526. }
  1527. bus = kzalloc(sizeof(*bus), GFP_KERNEL);
  1528. if (!bus) {
  1529. ret = -ENOMEM;
  1530. goto fail;
  1531. }
  1532. bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
  1533. if (!bus->msgbuf) {
  1534. ret = -ENOMEM;
  1535. kfree(bus);
  1536. goto fail;
  1537. }
  1538. /* hook it all together. */
  1539. pcie_bus_dev->devinfo = devinfo;
  1540. pcie_bus_dev->bus = bus;
  1541. bus->dev = &pdev->dev;
  1542. bus->bus_priv.pcie = pcie_bus_dev;
  1543. bus->ops = &brcmf_pcie_bus_ops;
  1544. bus->proto_type = BRCMF_PROTO_MSGBUF;
  1545. bus->chip = devinfo->coreid;
  1546. bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
  1547. dev_set_drvdata(&pdev->dev, bus);
  1548. ret = brcmf_pcie_get_fwnames(devinfo);
  1549. if (ret)
  1550. goto fail_bus;
  1551. ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
  1552. BRCMF_FW_REQ_NV_OPTIONAL,
  1553. devinfo->fw_name, devinfo->nvram_name,
  1554. brcmf_pcie_setup, domain_nr, bus_nr);
  1555. if (ret == 0)
  1556. return 0;
  1557. fail_bus:
  1558. kfree(bus->msgbuf);
  1559. kfree(bus);
  1560. fail:
  1561. brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device);
  1562. brcmf_pcie_release_resource(devinfo);
  1563. if (devinfo->ci)
  1564. brcmf_chip_detach(devinfo->ci);
  1565. kfree(pcie_bus_dev);
  1566. kfree(devinfo);
  1567. return ret;
  1568. }
  1569. static void
  1570. brcmf_pcie_remove(struct pci_dev *pdev)
  1571. {
  1572. struct brcmf_pciedev_info *devinfo;
  1573. struct brcmf_bus *bus;
  1574. brcmf_dbg(PCIE, "Enter\n");
  1575. bus = dev_get_drvdata(&pdev->dev);
  1576. if (bus == NULL)
  1577. return;
  1578. devinfo = bus->bus_priv.pcie->devinfo;
  1579. devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
  1580. if (devinfo->ci)
  1581. brcmf_pcie_intr_disable(devinfo);
  1582. brcmf_detach(&pdev->dev);
  1583. kfree(bus->bus_priv.pcie);
  1584. kfree(bus->msgbuf->flowrings);
  1585. kfree(bus->msgbuf);
  1586. kfree(bus);
  1587. brcmf_pcie_release_irq(devinfo);
  1588. brcmf_pcie_release_scratchbuffers(devinfo);
  1589. brcmf_pcie_release_ringbuffers(devinfo);
  1590. brcmf_pcie_reset_device(devinfo);
  1591. brcmf_pcie_release_resource(devinfo);
  1592. if (devinfo->ci)
  1593. brcmf_chip_detach(devinfo->ci);
  1594. kfree(devinfo);
  1595. dev_set_drvdata(&pdev->dev, NULL);
  1596. }
  1597. #ifdef CONFIG_PM
  1598. static int brcmf_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
  1599. {
  1600. struct brcmf_pciedev_info *devinfo;
  1601. struct brcmf_bus *bus;
  1602. int err;
  1603. brcmf_dbg(PCIE, "Enter, state=%d, pdev=%p\n", state.event, pdev);
  1604. bus = dev_get_drvdata(&pdev->dev);
  1605. devinfo = bus->bus_priv.pcie->devinfo;
  1606. brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
  1607. devinfo->mbdata_completed = false;
  1608. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
  1609. wait_event_timeout(devinfo->mbdata_resp_wait,
  1610. devinfo->mbdata_completed,
  1611. msecs_to_jiffies(BRCMF_PCIE_MBDATA_TIMEOUT));
  1612. if (!devinfo->mbdata_completed) {
  1613. brcmf_err("Timeout on response for entering D3 substate\n");
  1614. return -EIO;
  1615. }
  1616. brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM_IN_USE);
  1617. err = pci_save_state(pdev);
  1618. if (err)
  1619. brcmf_err("pci_save_state failed, err=%d\n", err);
  1620. if ((err) || (!devinfo->wowl_enabled)) {
  1621. brcmf_chip_detach(devinfo->ci);
  1622. devinfo->ci = NULL;
  1623. brcmf_pcie_remove(pdev);
  1624. return 0;
  1625. }
  1626. return pci_prepare_to_sleep(pdev);
  1627. }
  1628. static int brcmf_pcie_resume(struct pci_dev *pdev)
  1629. {
  1630. struct brcmf_pciedev_info *devinfo;
  1631. struct brcmf_bus *bus;
  1632. int err;
  1633. bus = dev_get_drvdata(&pdev->dev);
  1634. brcmf_dbg(PCIE, "Enter, pdev=%p, bus=%p\n", pdev, bus);
  1635. err = pci_set_power_state(pdev, PCI_D0);
  1636. if (err) {
  1637. brcmf_err("pci_set_power_state failed, err=%d\n", err);
  1638. goto cleanup;
  1639. }
  1640. pci_restore_state(pdev);
  1641. pci_enable_wake(pdev, PCI_D3hot, false);
  1642. pci_enable_wake(pdev, PCI_D3cold, false);
  1643. /* Check if device is still up and running, if so we are ready */
  1644. if (bus) {
  1645. devinfo = bus->bus_priv.pcie->devinfo;
  1646. if (brcmf_pcie_read_reg32(devinfo,
  1647. BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
  1648. if (brcmf_pcie_send_mb_data(devinfo,
  1649. BRCMF_H2D_HOST_D0_INFORM))
  1650. goto cleanup;
  1651. brcmf_dbg(PCIE, "Hot resume, continue....\n");
  1652. brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
  1653. brcmf_bus_change_state(bus, BRCMF_BUS_UP);
  1654. brcmf_pcie_intr_enable(devinfo);
  1655. return 0;
  1656. }
  1657. }
  1658. cleanup:
  1659. if (bus) {
  1660. devinfo = bus->bus_priv.pcie->devinfo;
  1661. brcmf_chip_detach(devinfo->ci);
  1662. devinfo->ci = NULL;
  1663. brcmf_pcie_remove(pdev);
  1664. }
  1665. err = brcmf_pcie_probe(pdev, NULL);
  1666. if (err)
  1667. brcmf_err("probe after resume failed, err=%d\n", err);
  1668. return err;
  1669. }
  1670. #endif /* CONFIG_PM */
  1671. #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
  1672. PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
  1673. static struct pci_device_id brcmf_pcie_devid_table[] = {
  1674. BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
  1675. BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
  1676. BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
  1677. BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
  1678. BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
  1679. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
  1680. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
  1681. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
  1682. BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
  1683. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
  1684. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
  1685. BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
  1686. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
  1687. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
  1688. BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
  1689. BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
  1690. { /* end: all zeroes */ }
  1691. };
  1692. MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
  1693. static struct pci_driver brcmf_pciedrvr = {
  1694. .node = {},
  1695. .name = KBUILD_MODNAME,
  1696. .id_table = brcmf_pcie_devid_table,
  1697. .probe = brcmf_pcie_probe,
  1698. .remove = brcmf_pcie_remove,
  1699. #ifdef CONFIG_PM
  1700. .suspend = brcmf_pcie_suspend,
  1701. .resume = brcmf_pcie_resume
  1702. #endif /* CONFIG_PM */
  1703. };
  1704. void brcmf_pcie_register(void)
  1705. {
  1706. int err;
  1707. brcmf_dbg(PCIE, "Enter\n");
  1708. err = pci_register_driver(&brcmf_pciedrvr);
  1709. if (err)
  1710. brcmf_err("PCIE driver registration failed, err=%d\n", err);
  1711. }
  1712. void brcmf_pcie_exit(void)
  1713. {
  1714. brcmf_dbg(PCIE, "Enter\n");
  1715. pci_unregister_driver(&brcmf_pciedrvr);
  1716. }