phy-xgene.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735
  1. /*
  2. * AppliedMicro X-Gene Multi-purpose PHY driver
  3. *
  4. * Copyright (c) 2014, Applied Micro Circuits Corporation
  5. * Author: Loc Ho <lho@apm.com>
  6. * Tuan Phan <tphan@apm.com>
  7. * Suman Tripathi <stripathi@apm.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or (at your
  12. * option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. *
  22. * The APM X-Gene PHY consists of two PLL clock macro's (CMU) and lanes.
  23. * The first PLL clock macro is used for internal reference clock. The second
  24. * PLL clock macro is used to generate the clock for the PHY. This driver
  25. * configures the first PLL CMU, the second PLL CMU, and programs the PHY to
  26. * operate according to the mode of operation. The first PLL CMU is only
  27. * required if internal clock is enabled.
  28. *
  29. * Logical Layer Out Of HW module units:
  30. *
  31. * -----------------
  32. * | Internal | |------|
  33. * | Ref PLL CMU |----| | ------------- ---------
  34. * ------------ ---- | MUX |-----|PHY PLL CMU|----| Serdes|
  35. * | | | | ---------
  36. * External Clock ------| | -------------
  37. * |------|
  38. *
  39. * The Ref PLL CMU CSR (Configuration System Registers) is accessed
  40. * indirectly from the SDS offset at 0x2000. It is only required for
  41. * internal reference clock.
  42. * The PHY PLL CMU CSR is accessed indirectly from the SDS offset at 0x0000.
  43. * The Serdes CSR is accessed indirectly from the SDS offset at 0x0400.
  44. *
  45. * The Ref PLL CMU can be located within the same PHY IP or outside the PHY IP
  46. * due to shared Ref PLL CMU. For PHY with Ref PLL CMU shared with another IP,
  47. * it is located outside the PHY IP. This is the case for the PHY located
  48. * at 0x1f23a000 (SATA Port 4/5). For such PHY, another resource is required
  49. * to located the SDS/Ref PLL CMU module and its clock for that IP enabled.
  50. *
  51. * Currently, this driver only supports Gen3 SATA mode with external clock.
  52. */
  53. #include <linux/module.h>
  54. #include <linux/platform_device.h>
  55. #include <linux/io.h>
  56. #include <linux/delay.h>
  57. #include <linux/phy/phy.h>
  58. #include <linux/clk.h>
  59. /* Max 2 lanes per a PHY unit */
  60. #define MAX_LANE 2
  61. /* Register offset inside the PHY */
  62. #define SERDES_PLL_INDIRECT_OFFSET 0x0000
  63. #define SERDES_PLL_REF_INDIRECT_OFFSET 0x2000
  64. #define SERDES_INDIRECT_OFFSET 0x0400
  65. #define SERDES_LANE_STRIDE 0x0200
  66. /* Some default Serdes parameters */
  67. #define DEFAULT_SATA_TXBOOST_GAIN { 0x1e, 0x1e, 0x1e }
  68. #define DEFAULT_SATA_TXEYEDIRECTION { 0x0, 0x0, 0x0 }
  69. #define DEFAULT_SATA_TXEYETUNING { 0xa, 0xa, 0xa }
  70. #define DEFAULT_SATA_SPD_SEL { 0x1, 0x3, 0x7 }
  71. #define DEFAULT_SATA_TXAMP { 0x8, 0x8, 0x8 }
  72. #define DEFAULT_SATA_TXCN1 { 0x2, 0x2, 0x2 }
  73. #define DEFAULT_SATA_TXCN2 { 0x0, 0x0, 0x0 }
  74. #define DEFAULT_SATA_TXCP1 { 0xa, 0xa, 0xa }
  75. #define SATA_SPD_SEL_GEN3 0x7
  76. #define SATA_SPD_SEL_GEN2 0x3
  77. #define SATA_SPD_SEL_GEN1 0x1
  78. #define SSC_DISABLE 0
  79. #define SSC_ENABLE 1
  80. #define FBDIV_VAL_50M 0x77
  81. #define REFDIV_VAL_50M 0x1
  82. #define FBDIV_VAL_100M 0x3B
  83. #define REFDIV_VAL_100M 0x0
  84. /* SATA Clock/Reset CSR */
  85. #define SATACLKENREG 0x00000000
  86. #define SATA0_CORE_CLKEN 0x00000002
  87. #define SATA1_CORE_CLKEN 0x00000004
  88. #define SATASRESETREG 0x00000004
  89. #define SATA_MEM_RESET_MASK 0x00000020
  90. #define SATA_MEM_RESET_RD(src) (((src) & 0x00000020) >> 5)
  91. #define SATA_SDS_RESET_MASK 0x00000004
  92. #define SATA_CSR_RESET_MASK 0x00000001
  93. #define SATA_CORE_RESET_MASK 0x00000002
  94. #define SATA_PMCLK_RESET_MASK 0x00000010
  95. #define SATA_PCLK_RESET_MASK 0x00000008
  96. /* SDS CSR used for PHY Indirect access */
  97. #define SATA_ENET_SDS_PCS_CTL0 0x00000000
  98. #define REGSPEC_CFG_I_TX_WORDMODE0_SET(dst, src) \
  99. (((dst) & ~0x00070000) | (((u32) (src) << 16) & 0x00070000))
  100. #define REGSPEC_CFG_I_RX_WORDMODE0_SET(dst, src) \
  101. (((dst) & ~0x00e00000) | (((u32) (src) << 21) & 0x00e00000))
  102. #define SATA_ENET_SDS_CTL0 0x0000000c
  103. #define REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(dst, src) \
  104. (((dst) & ~0x00007fff) | (((u32) (src)) & 0x00007fff))
  105. #define SATA_ENET_SDS_CTL1 0x00000010
  106. #define CFG_I_SPD_SEL_CDR_OVR1_SET(dst, src) \
  107. (((dst) & ~0x0000000f) | (((u32) (src)) & 0x0000000f))
  108. #define SATA_ENET_SDS_RST_CTL 0x00000024
  109. #define SATA_ENET_SDS_IND_CMD_REG 0x0000003c
  110. #define CFG_IND_WR_CMD_MASK 0x00000001
  111. #define CFG_IND_RD_CMD_MASK 0x00000002
  112. #define CFG_IND_CMD_DONE_MASK 0x00000004
  113. #define CFG_IND_ADDR_SET(dst, src) \
  114. (((dst) & ~0x003ffff0) | (((u32) (src) << 4) & 0x003ffff0))
  115. #define SATA_ENET_SDS_IND_RDATA_REG 0x00000040
  116. #define SATA_ENET_SDS_IND_WDATA_REG 0x00000044
  117. #define SATA_ENET_CLK_MACRO_REG 0x0000004c
  118. #define I_RESET_B_SET(dst, src) \
  119. (((dst) & ~0x00000001) | (((u32) (src)) & 0x00000001))
  120. #define I_PLL_FBDIV_SET(dst, src) \
  121. (((dst) & ~0x001ff000) | (((u32) (src) << 12) & 0x001ff000))
  122. #define I_CUSTOMEROV_SET(dst, src) \
  123. (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80))
  124. #define O_PLL_LOCK_RD(src) (((src) & 0x40000000) >> 30)
  125. #define O_PLL_READY_RD(src) (((src) & 0x80000000) >> 31)
  126. /* PLL Clock Macro Unit (CMU) CSR accessing from SDS indirectly */
  127. #define CMU_REG0 0x00000
  128. #define CMU_REG0_PLL_REF_SEL_MASK 0x00002000
  129. #define CMU_REG0_PLL_REF_SEL_SET(dst, src) \
  130. (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000))
  131. #define CMU_REG0_PDOWN_MASK 0x00004000
  132. #define CMU_REG0_CAL_COUNT_RESOL_SET(dst, src) \
  133. (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0))
  134. #define CMU_REG1 0x00002
  135. #define CMU_REG1_PLL_CP_SET(dst, src) \
  136. (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00))
  137. #define CMU_REG1_PLL_MANUALCAL_SET(dst, src) \
  138. (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
  139. #define CMU_REG1_PLL_CP_SEL_SET(dst, src) \
  140. (((dst) & ~0x000003e0) | (((u32) (src) << 5) & 0x000003e0))
  141. #define CMU_REG1_REFCLK_CMOS_SEL_MASK 0x00000001
  142. #define CMU_REG1_REFCLK_CMOS_SEL_SET(dst, src) \
  143. (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
  144. #define CMU_REG2 0x00004
  145. #define CMU_REG2_PLL_REFDIV_SET(dst, src) \
  146. (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
  147. #define CMU_REG2_PLL_LFRES_SET(dst, src) \
  148. (((dst) & ~0x0000001e) | (((u32) (src) << 1) & 0x0000001e))
  149. #define CMU_REG2_PLL_FBDIV_SET(dst, src) \
  150. (((dst) & ~0x00003fe0) | (((u32) (src) << 5) & 0x00003fe0))
  151. #define CMU_REG3 0x00006
  152. #define CMU_REG3_VCOVARSEL_SET(dst, src) \
  153. (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f))
  154. #define CMU_REG3_VCO_MOMSEL_INIT_SET(dst, src) \
  155. (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
  156. #define CMU_REG3_VCO_MANMOMSEL_SET(dst, src) \
  157. (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
  158. #define CMU_REG4 0x00008
  159. #define CMU_REG5 0x0000a
  160. #define CMU_REG5_PLL_LFSMCAP_SET(dst, src) \
  161. (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
  162. #define CMU_REG5_PLL_LOCK_RESOLUTION_SET(dst, src) \
  163. (((dst) & ~0x0000000e) | (((u32) (src) << 1) & 0x0000000e))
  164. #define CMU_REG5_PLL_LFCAP_SET(dst, src) \
  165. (((dst) & ~0x00003000) | (((u32) (src) << 12) & 0x00003000))
  166. #define CMU_REG5_PLL_RESETB_MASK 0x00000001
  167. #define CMU_REG6 0x0000c
  168. #define CMU_REG6_PLL_VREGTRIM_SET(dst, src) \
  169. (((dst) & ~0x00000600) | (((u32) (src) << 9) & 0x00000600))
  170. #define CMU_REG6_MAN_PVT_CAL_SET(dst, src) \
  171. (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
  172. #define CMU_REG7 0x0000e
  173. #define CMU_REG7_PLL_CALIB_DONE_RD(src) ((0x00004000 & (u32) (src)) >> 14)
  174. #define CMU_REG7_VCO_CAL_FAIL_RD(src) ((0x00000c00 & (u32) (src)) >> 10)
  175. #define CMU_REG8 0x00010
  176. #define CMU_REG9 0x00012
  177. #define CMU_REG9_WORD_LEN_8BIT 0x000
  178. #define CMU_REG9_WORD_LEN_10BIT 0x001
  179. #define CMU_REG9_WORD_LEN_16BIT 0x002
  180. #define CMU_REG9_WORD_LEN_20BIT 0x003
  181. #define CMU_REG9_WORD_LEN_32BIT 0x004
  182. #define CMU_REG9_WORD_LEN_40BIT 0x005
  183. #define CMU_REG9_WORD_LEN_64BIT 0x006
  184. #define CMU_REG9_WORD_LEN_66BIT 0x007
  185. #define CMU_REG9_TX_WORD_MODE_CH1_SET(dst, src) \
  186. (((dst) & ~0x00000380) | (((u32) (src) << 7) & 0x00000380))
  187. #define CMU_REG9_TX_WORD_MODE_CH0_SET(dst, src) \
  188. (((dst) & ~0x00000070) | (((u32) (src) << 4) & 0x00000070))
  189. #define CMU_REG9_PLL_POST_DIVBY2_SET(dst, src) \
  190. (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
  191. #define CMU_REG9_VBG_BYPASSB_SET(dst, src) \
  192. (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
  193. #define CMU_REG9_IGEN_BYPASS_SET(dst, src) \
  194. (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
  195. #define CMU_REG10 0x00014
  196. #define CMU_REG10_VREG_REFSEL_SET(dst, src) \
  197. (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
  198. #define CMU_REG11 0x00016
  199. #define CMU_REG12 0x00018
  200. #define CMU_REG12_STATE_DELAY9_SET(dst, src) \
  201. (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
  202. #define CMU_REG13 0x0001a
  203. #define CMU_REG14 0x0001c
  204. #define CMU_REG15 0x0001e
  205. #define CMU_REG16 0x00020
  206. #define CMU_REG16_PVT_DN_MAN_ENA_MASK 0x00000001
  207. #define CMU_REG16_PVT_UP_MAN_ENA_MASK 0x00000002
  208. #define CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(dst, src) \
  209. (((dst) & ~0x0000001c) | (((u32) (src) << 2) & 0x0000001c))
  210. #define CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(dst, src) \
  211. (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
  212. #define CMU_REG16_BYPASS_PLL_LOCK_SET(dst, src) \
  213. (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
  214. #define CMU_REG17 0x00022
  215. #define CMU_REG17_PVT_CODE_R2A_SET(dst, src) \
  216. (((dst) & ~0x00007f00) | (((u32) (src) << 8) & 0x00007f00))
  217. #define CMU_REG17_RESERVED_7_SET(dst, src) \
  218. (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0))
  219. #define CMU_REG17_PVT_TERM_MAN_ENA_MASK 0x00008000
  220. #define CMU_REG18 0x00024
  221. #define CMU_REG19 0x00026
  222. #define CMU_REG20 0x00028
  223. #define CMU_REG21 0x0002a
  224. #define CMU_REG22 0x0002c
  225. #define CMU_REG23 0x0002e
  226. #define CMU_REG24 0x00030
  227. #define CMU_REG25 0x00032
  228. #define CMU_REG26 0x00034
  229. #define CMU_REG26_FORCE_PLL_LOCK_SET(dst, src) \
  230. (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
  231. #define CMU_REG27 0x00036
  232. #define CMU_REG28 0x00038
  233. #define CMU_REG29 0x0003a
  234. #define CMU_REG30 0x0003c
  235. #define CMU_REG30_LOCK_COUNT_SET(dst, src) \
  236. (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
  237. #define CMU_REG30_PCIE_MODE_SET(dst, src) \
  238. (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
  239. #define CMU_REG31 0x0003e
  240. #define CMU_REG32 0x00040
  241. #define CMU_REG32_FORCE_VCOCAL_START_MASK 0x00004000
  242. #define CMU_REG32_PVT_CAL_WAIT_SEL_SET(dst, src) \
  243. (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
  244. #define CMU_REG32_IREF_ADJ_SET(dst, src) \
  245. (((dst) & ~0x00000180) | (((u32) (src) << 7) & 0x00000180))
  246. #define CMU_REG33 0x00042
  247. #define CMU_REG34 0x00044
  248. #define CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(dst, src) \
  249. (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f))
  250. #define CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(dst, src) \
  251. (((dst) & ~0x00000f00) | (((u32) (src) << 8) & 0x00000f00))
  252. #define CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(dst, src) \
  253. (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
  254. #define CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(dst, src) \
  255. (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000))
  256. #define CMU_REG35 0x00046
  257. #define CMU_REG35_PLL_SSC_MOD_SET(dst, src) \
  258. (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00))
  259. #define CMU_REG36 0x00048
  260. #define CMU_REG36_PLL_SSC_EN_SET(dst, src) \
  261. (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010))
  262. #define CMU_REG36_PLL_SSC_VSTEP_SET(dst, src) \
  263. (((dst) & ~0x0000ffc0) | (((u32) (src) << 6) & 0x0000ffc0))
  264. #define CMU_REG36_PLL_SSC_DSMSEL_SET(dst, src) \
  265. (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
  266. #define CMU_REG37 0x0004a
  267. #define CMU_REG38 0x0004c
  268. #define CMU_REG39 0x0004e
  269. /* PHY lane CSR accessing from SDS indirectly */
  270. #define RXTX_REG0 0x000
  271. #define RXTX_REG0_CTLE_EQ_HR_SET(dst, src) \
  272. (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
  273. #define RXTX_REG0_CTLE_EQ_QR_SET(dst, src) \
  274. (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
  275. #define RXTX_REG0_CTLE_EQ_FR_SET(dst, src) \
  276. (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
  277. #define RXTX_REG1 0x002
  278. #define RXTX_REG1_RXACVCM_SET(dst, src) \
  279. (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000))
  280. #define RXTX_REG1_CTLE_EQ_SET(dst, src) \
  281. (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80))
  282. #define RXTX_REG1_RXVREG1_SET(dst, src) \
  283. (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060))
  284. #define RXTX_REG1_RXIREF_ADJ_SET(dst, src) \
  285. (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006))
  286. #define RXTX_REG2 0x004
  287. #define RXTX_REG2_VTT_ENA_SET(dst, src) \
  288. (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
  289. #define RXTX_REG2_TX_FIFO_ENA_SET(dst, src) \
  290. (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020))
  291. #define RXTX_REG2_VTT_SEL_SET(dst, src) \
  292. (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0))
  293. #define RXTX_REG4 0x008
  294. #define RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK 0x00000040
  295. #define RXTX_REG4_TX_DATA_RATE_SET(dst, src) \
  296. (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
  297. #define RXTX_REG4_TX_WORD_MODE_SET(dst, src) \
  298. (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
  299. #define RXTX_REG5 0x00a
  300. #define RXTX_REG5_TX_CN1_SET(dst, src) \
  301. (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
  302. #define RXTX_REG5_TX_CP1_SET(dst, src) \
  303. (((dst) & ~0x000007e0) | (((u32) (src) << 5) & 0x000007e0))
  304. #define RXTX_REG5_TX_CN2_SET(dst, src) \
  305. (((dst) & ~0x0000001f) | (((u32) (src) << 0) & 0x0000001f))
  306. #define RXTX_REG6 0x00c
  307. #define RXTX_REG6_TXAMP_CNTL_SET(dst, src) \
  308. (((dst) & ~0x00000780) | (((u32) (src) << 7) & 0x00000780))
  309. #define RXTX_REG6_TXAMP_ENA_SET(dst, src) \
  310. (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
  311. #define RXTX_REG6_RX_BIST_ERRCNT_RD_SET(dst, src) \
  312. (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
  313. #define RXTX_REG6_TX_IDLE_SET(dst, src) \
  314. (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
  315. #define RXTX_REG6_RX_BIST_RESYNC_SET(dst, src) \
  316. (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
  317. #define RXTX_REG7 0x00e
  318. #define RXTX_REG7_RESETB_RXD_MASK 0x00000100
  319. #define RXTX_REG7_RESETB_RXA_MASK 0x00000080
  320. #define RXTX_REG7_BIST_ENA_RX_SET(dst, src) \
  321. (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
  322. #define RXTX_REG7_RX_WORD_MODE_SET(dst, src) \
  323. (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
  324. #define RXTX_REG8 0x010
  325. #define RXTX_REG8_CDR_LOOP_ENA_SET(dst, src) \
  326. (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000))
  327. #define RXTX_REG8_CDR_BYPASS_RXLOS_SET(dst, src) \
  328. (((dst) & ~0x00000800) | (((u32) (src) << 11) & 0x00000800))
  329. #define RXTX_REG8_SSC_ENABLE_SET(dst, src) \
  330. (((dst) & ~0x00000200) | (((u32) (src) << 9) & 0x00000200))
  331. #define RXTX_REG8_SD_VREF_SET(dst, src) \
  332. (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0))
  333. #define RXTX_REG8_SD_DISABLE_SET(dst, src) \
  334. (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
  335. #define RXTX_REG7 0x00e
  336. #define RXTX_REG7_RESETB_RXD_SET(dst, src) \
  337. (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
  338. #define RXTX_REG7_RESETB_RXA_SET(dst, src) \
  339. (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080))
  340. #define RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK 0x00004000
  341. #define RXTX_REG7_LOOP_BACK_ENA_CTLE_SET(dst, src) \
  342. (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000))
  343. #define RXTX_REG11 0x016
  344. #define RXTX_REG11_PHASE_ADJUST_LIMIT_SET(dst, src) \
  345. (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
  346. #define RXTX_REG12 0x018
  347. #define RXTX_REG12_LATCH_OFF_ENA_SET(dst, src) \
  348. (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000))
  349. #define RXTX_REG12_SUMOS_ENABLE_SET(dst, src) \
  350. (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
  351. #define RXTX_REG12_RX_DET_TERM_ENABLE_MASK 0x00000002
  352. #define RXTX_REG12_RX_DET_TERM_ENABLE_SET(dst, src) \
  353. (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
  354. #define RXTX_REG13 0x01a
  355. #define RXTX_REG14 0x01c
  356. #define RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(dst, src) \
  357. (((dst) & ~0x0000003f) | (((u32) (src) << 0) & 0x0000003f))
  358. #define RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(dst, src) \
  359. (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040))
  360. #define RXTX_REG26 0x034
  361. #define RXTX_REG26_PERIOD_ERROR_LATCH_SET(dst, src) \
  362. (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
  363. #define RXTX_REG26_BLWC_ENA_SET(dst, src) \
  364. (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
  365. #define RXTX_REG21 0x02a
  366. #define RXTX_REG21_DO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
  367. #define RXTX_REG21_XO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
  368. #define RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(src) ((0x0000000f & (u32)(src)))
  369. #define RXTX_REG22 0x02c
  370. #define RXTX_REG22_SO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
  371. #define RXTX_REG22_EO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
  372. #define RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(src) ((0x0000000f & (u32)(src)))
  373. #define RXTX_REG23 0x02e
  374. #define RXTX_REG23_DE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
  375. #define RXTX_REG23_XE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
  376. #define RXTX_REG24 0x030
  377. #define RXTX_REG24_EE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10)
  378. #define RXTX_REG24_SE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4)
  379. #define RXTX_REG27 0x036
  380. #define RXTX_REG28 0x038
  381. #define RXTX_REG31 0x03e
  382. #define RXTX_REG38 0x04c
  383. #define RXTX_REG38_CUSTOMER_PINMODE_INV_SET(dst, src) \
  384. (((dst) & 0x0000fffe) | (((u32) (src) << 1) & 0x0000fffe))
  385. #define RXTX_REG39 0x04e
  386. #define RXTX_REG40 0x050
  387. #define RXTX_REG41 0x052
  388. #define RXTX_REG42 0x054
  389. #define RXTX_REG43 0x056
  390. #define RXTX_REG44 0x058
  391. #define RXTX_REG45 0x05a
  392. #define RXTX_REG46 0x05c
  393. #define RXTX_REG47 0x05e
  394. #define RXTX_REG48 0x060
  395. #define RXTX_REG49 0x062
  396. #define RXTX_REG50 0x064
  397. #define RXTX_REG51 0x066
  398. #define RXTX_REG52 0x068
  399. #define RXTX_REG53 0x06a
  400. #define RXTX_REG54 0x06c
  401. #define RXTX_REG55 0x06e
  402. #define RXTX_REG61 0x07a
  403. #define RXTX_REG61_ISCAN_INBERT_SET(dst, src) \
  404. (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010))
  405. #define RXTX_REG61_LOADFREQ_SHIFT_SET(dst, src) \
  406. (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
  407. #define RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(dst, src) \
  408. (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0))
  409. #define RXTX_REG61_SPD_SEL_CDR_SET(dst, src) \
  410. (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00))
  411. #define RXTX_REG62 0x07c
  412. #define RXTX_REG62_PERIOD_H1_QLATCH_SET(dst, src) \
  413. (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800))
  414. #define RXTX_REG81 0x0a2
  415. #define RXTX_REG89_MU_TH7_SET(dst, src) \
  416. (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
  417. #define RXTX_REG89_MU_TH8_SET(dst, src) \
  418. (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
  419. #define RXTX_REG89_MU_TH9_SET(dst, src) \
  420. (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
  421. #define RXTX_REG96 0x0c0
  422. #define RXTX_REG96_MU_FREQ1_SET(dst, src) \
  423. (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
  424. #define RXTX_REG96_MU_FREQ2_SET(dst, src) \
  425. (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
  426. #define RXTX_REG96_MU_FREQ3_SET(dst, src) \
  427. (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
  428. #define RXTX_REG99 0x0c6
  429. #define RXTX_REG99_MU_PHASE1_SET(dst, src) \
  430. (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800))
  431. #define RXTX_REG99_MU_PHASE2_SET(dst, src) \
  432. (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0))
  433. #define RXTX_REG99_MU_PHASE3_SET(dst, src) \
  434. (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e))
  435. #define RXTX_REG102 0x0cc
  436. #define RXTX_REG102_FREQLOOP_LIMIT_SET(dst, src) \
  437. (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060))
  438. #define RXTX_REG114 0x0e4
  439. #define RXTX_REG121 0x0f2
  440. #define RXTX_REG121_SUMOS_CAL_CODE_RD(src) ((0x0000003e & (u32)(src)) >> 0x1)
  441. #define RXTX_REG125 0x0fa
  442. #define RXTX_REG125_PQ_REG_SET(dst, src) \
  443. (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00))
  444. #define RXTX_REG125_SIGN_PQ_SET(dst, src) \
  445. (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100))
  446. #define RXTX_REG125_SIGN_PQ_2C_SET(dst, src) \
  447. (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080))
  448. #define RXTX_REG125_PHZ_MANUALCODE_SET(dst, src) \
  449. (((dst) & ~0x0000007c) | (((u32) (src) << 2) & 0x0000007c))
  450. #define RXTX_REG125_PHZ_MANUAL_SET(dst, src) \
  451. (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
  452. #define RXTX_REG127 0x0fe
  453. #define RXTX_REG127_FORCE_SUM_CAL_START_MASK 0x00000002
  454. #define RXTX_REG127_FORCE_LAT_CAL_START_MASK 0x00000004
  455. #define RXTX_REG127_FORCE_SUM_CAL_START_SET(dst, src) \
  456. (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
  457. #define RXTX_REG127_FORCE_LAT_CAL_START_SET(dst, src) \
  458. (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
  459. #define RXTX_REG127_LATCH_MAN_CAL_ENA_SET(dst, src) \
  460. (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008))
  461. #define RXTX_REG127_DO_LATCH_MANCAL_SET(dst, src) \
  462. (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
  463. #define RXTX_REG127_XO_LATCH_MANCAL_SET(dst, src) \
  464. (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
  465. #define RXTX_REG128 0x100
  466. #define RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(dst, src) \
  467. (((dst) & ~0x0000000c) | (((u32) (src) << 2) & 0x0000000c))
  468. #define RXTX_REG128_EO_LATCH_MANCAL_SET(dst, src) \
  469. (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
  470. #define RXTX_REG128_SO_LATCH_MANCAL_SET(dst, src) \
  471. (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
  472. #define RXTX_REG129 0x102
  473. #define RXTX_REG129_DE_LATCH_MANCAL_SET(dst, src) \
  474. (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
  475. #define RXTX_REG129_XE_LATCH_MANCAL_SET(dst, src) \
  476. (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
  477. #define RXTX_REG130 0x104
  478. #define RXTX_REG130_EE_LATCH_MANCAL_SET(dst, src) \
  479. (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00))
  480. #define RXTX_REG130_SE_LATCH_MANCAL_SET(dst, src) \
  481. (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0))
  482. #define RXTX_REG145 0x122
  483. #define RXTX_REG145_TX_IDLE_SATA_SET(dst, src) \
  484. (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001))
  485. #define RXTX_REG145_RXES_ENA_SET(dst, src) \
  486. (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002))
  487. #define RXTX_REG145_RXDFE_CONFIG_SET(dst, src) \
  488. (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000))
  489. #define RXTX_REG145_RXVWES_LATENA_SET(dst, src) \
  490. (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004))
  491. #define RXTX_REG147 0x126
  492. #define RXTX_REG148 0x128
  493. /* Clock macro type */
  494. enum cmu_type_t {
  495. REF_CMU = 0, /* Clock macro is the internal reference clock */
  496. PHY_CMU = 1, /* Clock macro is the PLL for the Serdes */
  497. };
  498. enum mux_type_t {
  499. MUX_SELECT_ATA = 0, /* Switch the MUX to ATA */
  500. MUX_SELECT_SGMMII = 0, /* Switch the MUX to SGMII */
  501. };
  502. enum clk_type_t {
  503. CLK_EXT_DIFF = 0, /* External differential */
  504. CLK_INT_DIFF = 1, /* Internal differential */
  505. CLK_INT_SING = 2, /* Internal single ended */
  506. };
  507. enum phy_mode {
  508. MODE_SATA = 0, /* List them for simple reference */
  509. MODE_SGMII = 1,
  510. MODE_PCIE = 2,
  511. MODE_USB = 3,
  512. MODE_XFI = 4,
  513. MODE_MAX
  514. };
  515. struct xgene_sata_override_param {
  516. u32 speed[MAX_LANE]; /* Index for override parameter per lane */
  517. u32 txspeed[3]; /* Tx speed */
  518. u32 txboostgain[MAX_LANE*3]; /* Tx freq boost and gain control */
  519. u32 txeyetuning[MAX_LANE*3]; /* Tx eye tuning */
  520. u32 txeyedirection[MAX_LANE*3]; /* Tx eye tuning direction */
  521. u32 txamplitude[MAX_LANE*3]; /* Tx amplitude control */
  522. u32 txprecursor_cn1[MAX_LANE*3]; /* Tx emphasis taps 1st pre-cursor */
  523. u32 txprecursor_cn2[MAX_LANE*3]; /* Tx emphasis taps 2nd pre-cursor */
  524. u32 txpostcursor_cp1[MAX_LANE*3]; /* Tx emphasis taps post-cursor */
  525. };
  526. struct xgene_phy_ctx {
  527. struct device *dev;
  528. struct phy *phy;
  529. enum phy_mode mode; /* Mode of operation */
  530. enum clk_type_t clk_type; /* Input clock selection */
  531. void __iomem *sds_base; /* PHY CSR base addr */
  532. struct clk *clk; /* Optional clock */
  533. /* Override Serdes parameters */
  534. struct xgene_sata_override_param sata_param;
  535. };
  536. /*
  537. * For chip earlier than A3 version, enable this flag.
  538. * To enable, pass boot argument phy_xgene.preA3Chip=1
  539. */
  540. static int preA3Chip;
  541. MODULE_PARM_DESC(preA3Chip, "Enable pre-A3 chip support (1=enable 0=disable)");
  542. module_param_named(preA3Chip, preA3Chip, int, 0444);
  543. static void sds_wr(void __iomem *csr_base, u32 indirect_cmd_reg,
  544. u32 indirect_data_reg, u32 addr, u32 data)
  545. {
  546. unsigned long deadline = jiffies + HZ;
  547. u32 val;
  548. u32 cmd;
  549. cmd = CFG_IND_WR_CMD_MASK | CFG_IND_CMD_DONE_MASK;
  550. cmd = CFG_IND_ADDR_SET(cmd, addr);
  551. writel(data, csr_base + indirect_data_reg);
  552. readl(csr_base + indirect_data_reg); /* Force a barrier */
  553. writel(cmd, csr_base + indirect_cmd_reg);
  554. readl(csr_base + indirect_cmd_reg); /* Force a barrier */
  555. do {
  556. val = readl(csr_base + indirect_cmd_reg);
  557. } while (!(val & CFG_IND_CMD_DONE_MASK) &&
  558. time_before(jiffies, deadline));
  559. if (!(val & CFG_IND_CMD_DONE_MASK))
  560. pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n",
  561. csr_base + indirect_cmd_reg, addr, data);
  562. }
  563. static void sds_rd(void __iomem *csr_base, u32 indirect_cmd_reg,
  564. u32 indirect_data_reg, u32 addr, u32 *data)
  565. {
  566. unsigned long deadline = jiffies + HZ;
  567. u32 val;
  568. u32 cmd;
  569. cmd = CFG_IND_RD_CMD_MASK | CFG_IND_CMD_DONE_MASK;
  570. cmd = CFG_IND_ADDR_SET(cmd, addr);
  571. writel(cmd, csr_base + indirect_cmd_reg);
  572. readl(csr_base + indirect_cmd_reg); /* Force a barrier */
  573. do {
  574. val = readl(csr_base + indirect_cmd_reg);
  575. } while (!(val & CFG_IND_CMD_DONE_MASK) &&
  576. time_before(jiffies, deadline));
  577. *data = readl(csr_base + indirect_data_reg);
  578. if (!(val & CFG_IND_CMD_DONE_MASK))
  579. pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n",
  580. csr_base + indirect_cmd_reg, addr, *data);
  581. }
  582. static void cmu_wr(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
  583. u32 reg, u32 data)
  584. {
  585. void __iomem *sds_base = ctx->sds_base;
  586. u32 val;
  587. if (cmu_type == REF_CMU)
  588. reg += SERDES_PLL_REF_INDIRECT_OFFSET;
  589. else
  590. reg += SERDES_PLL_INDIRECT_OFFSET;
  591. sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG,
  592. SATA_ENET_SDS_IND_WDATA_REG, reg, data);
  593. sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
  594. SATA_ENET_SDS_IND_RDATA_REG, reg, &val);
  595. pr_debug("CMU WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data, val);
  596. }
  597. static void cmu_rd(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
  598. u32 reg, u32 *data)
  599. {
  600. void __iomem *sds_base = ctx->sds_base;
  601. if (cmu_type == REF_CMU)
  602. reg += SERDES_PLL_REF_INDIRECT_OFFSET;
  603. else
  604. reg += SERDES_PLL_INDIRECT_OFFSET;
  605. sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
  606. SATA_ENET_SDS_IND_RDATA_REG, reg, data);
  607. pr_debug("CMU RD addr 0x%X value 0x%08X\n", reg, *data);
  608. }
  609. static void cmu_toggle1to0(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
  610. u32 reg, u32 bits)
  611. {
  612. u32 val;
  613. cmu_rd(ctx, cmu_type, reg, &val);
  614. val |= bits;
  615. cmu_wr(ctx, cmu_type, reg, val);
  616. cmu_rd(ctx, cmu_type, reg, &val);
  617. val &= ~bits;
  618. cmu_wr(ctx, cmu_type, reg, val);
  619. }
  620. static void cmu_clrbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
  621. u32 reg, u32 bits)
  622. {
  623. u32 val;
  624. cmu_rd(ctx, cmu_type, reg, &val);
  625. val &= ~bits;
  626. cmu_wr(ctx, cmu_type, reg, val);
  627. }
  628. static void cmu_setbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type,
  629. u32 reg, u32 bits)
  630. {
  631. u32 val;
  632. cmu_rd(ctx, cmu_type, reg, &val);
  633. val |= bits;
  634. cmu_wr(ctx, cmu_type, reg, val);
  635. }
  636. static void serdes_wr(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 data)
  637. {
  638. void __iomem *sds_base = ctx->sds_base;
  639. u32 val;
  640. reg += SERDES_INDIRECT_OFFSET;
  641. reg += lane * SERDES_LANE_STRIDE;
  642. sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG,
  643. SATA_ENET_SDS_IND_WDATA_REG, reg, data);
  644. sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
  645. SATA_ENET_SDS_IND_RDATA_REG, reg, &val);
  646. pr_debug("SERDES WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data,
  647. val);
  648. }
  649. static void serdes_rd(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 *data)
  650. {
  651. void __iomem *sds_base = ctx->sds_base;
  652. reg += SERDES_INDIRECT_OFFSET;
  653. reg += lane * SERDES_LANE_STRIDE;
  654. sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG,
  655. SATA_ENET_SDS_IND_RDATA_REG, reg, data);
  656. pr_debug("SERDES RD addr 0x%X value 0x%08X\n", reg, *data);
  657. }
  658. static void serdes_clrbits(struct xgene_phy_ctx *ctx, int lane, u32 reg,
  659. u32 bits)
  660. {
  661. u32 val;
  662. serdes_rd(ctx, lane, reg, &val);
  663. val &= ~bits;
  664. serdes_wr(ctx, lane, reg, val);
  665. }
  666. static void serdes_setbits(struct xgene_phy_ctx *ctx, int lane, u32 reg,
  667. u32 bits)
  668. {
  669. u32 val;
  670. serdes_rd(ctx, lane, reg, &val);
  671. val |= bits;
  672. serdes_wr(ctx, lane, reg, val);
  673. }
  674. static void xgene_phy_cfg_cmu_clk_type(struct xgene_phy_ctx *ctx,
  675. enum cmu_type_t cmu_type,
  676. enum clk_type_t clk_type)
  677. {
  678. u32 val;
  679. /* Set the reset sequence delay for TX ready assertion */
  680. cmu_rd(ctx, cmu_type, CMU_REG12, &val);
  681. val = CMU_REG12_STATE_DELAY9_SET(val, 0x1);
  682. cmu_wr(ctx, cmu_type, CMU_REG12, val);
  683. /* Set the programmable stage delays between various enable stages */
  684. cmu_wr(ctx, cmu_type, CMU_REG13, 0x0222);
  685. cmu_wr(ctx, cmu_type, CMU_REG14, 0x2225);
  686. /* Configure clock type */
  687. if (clk_type == CLK_EXT_DIFF) {
  688. /* Select external clock mux */
  689. cmu_rd(ctx, cmu_type, CMU_REG0, &val);
  690. val = CMU_REG0_PLL_REF_SEL_SET(val, 0x0);
  691. cmu_wr(ctx, cmu_type, CMU_REG0, val);
  692. /* Select CMOS as reference clock */
  693. cmu_rd(ctx, cmu_type, CMU_REG1, &val);
  694. val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0);
  695. cmu_wr(ctx, cmu_type, CMU_REG1, val);
  696. dev_dbg(ctx->dev, "Set external reference clock\n");
  697. } else if (clk_type == CLK_INT_DIFF) {
  698. /* Select internal clock mux */
  699. cmu_rd(ctx, cmu_type, CMU_REG0, &val);
  700. val = CMU_REG0_PLL_REF_SEL_SET(val, 0x1);
  701. cmu_wr(ctx, cmu_type, CMU_REG0, val);
  702. /* Select CMOS as reference clock */
  703. cmu_rd(ctx, cmu_type, CMU_REG1, &val);
  704. val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1);
  705. cmu_wr(ctx, cmu_type, CMU_REG1, val);
  706. dev_dbg(ctx->dev, "Set internal reference clock\n");
  707. } else if (clk_type == CLK_INT_SING) {
  708. /*
  709. * NOTE: This clock type is NOT support for controller
  710. * whose internal clock shared in the PCIe controller
  711. *
  712. * Select internal clock mux
  713. */
  714. cmu_rd(ctx, cmu_type, CMU_REG1, &val);
  715. val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1);
  716. cmu_wr(ctx, cmu_type, CMU_REG1, val);
  717. /* Select CML as reference clock */
  718. cmu_rd(ctx, cmu_type, CMU_REG1, &val);
  719. val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0);
  720. cmu_wr(ctx, cmu_type, CMU_REG1, val);
  721. dev_dbg(ctx->dev,
  722. "Set internal single ended reference clock\n");
  723. }
  724. }
  725. static void xgene_phy_sata_cfg_cmu_core(struct xgene_phy_ctx *ctx,
  726. enum cmu_type_t cmu_type,
  727. enum clk_type_t clk_type)
  728. {
  729. u32 val;
  730. int ref_100MHz;
  731. if (cmu_type == REF_CMU) {
  732. /* Set VCO calibration voltage threshold */
  733. cmu_rd(ctx, cmu_type, CMU_REG34, &val);
  734. val = CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(val, 0x7);
  735. val = CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(val, 0xc);
  736. val = CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(val, 0x3);
  737. val = CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(val, 0x8);
  738. cmu_wr(ctx, cmu_type, CMU_REG34, val);
  739. }
  740. /* Set the VCO calibration counter */
  741. cmu_rd(ctx, cmu_type, CMU_REG0, &val);
  742. if (cmu_type == REF_CMU || preA3Chip)
  743. val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x4);
  744. else
  745. val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x7);
  746. cmu_wr(ctx, cmu_type, CMU_REG0, val);
  747. /* Configure PLL for calibration */
  748. cmu_rd(ctx, cmu_type, CMU_REG1, &val);
  749. val = CMU_REG1_PLL_CP_SET(val, 0x1);
  750. if (cmu_type == REF_CMU || preA3Chip)
  751. val = CMU_REG1_PLL_CP_SEL_SET(val, 0x5);
  752. else
  753. val = CMU_REG1_PLL_CP_SEL_SET(val, 0x3);
  754. if (cmu_type == REF_CMU)
  755. val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0);
  756. else
  757. val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x1);
  758. cmu_wr(ctx, cmu_type, CMU_REG1, val);
  759. if (cmu_type != REF_CMU)
  760. cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
  761. /* Configure the PLL for either 100MHz or 50MHz */
  762. cmu_rd(ctx, cmu_type, CMU_REG2, &val);
  763. if (cmu_type == REF_CMU) {
  764. val = CMU_REG2_PLL_LFRES_SET(val, 0xa);
  765. ref_100MHz = 1;
  766. } else {
  767. val = CMU_REG2_PLL_LFRES_SET(val, 0x3);
  768. if (clk_type == CLK_EXT_DIFF)
  769. ref_100MHz = 0;
  770. else
  771. ref_100MHz = 1;
  772. }
  773. if (ref_100MHz) {
  774. val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_100M);
  775. val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_100M);
  776. } else {
  777. val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_50M);
  778. val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_50M);
  779. }
  780. cmu_wr(ctx, cmu_type, CMU_REG2, val);
  781. /* Configure the VCO */
  782. cmu_rd(ctx, cmu_type, CMU_REG3, &val);
  783. if (cmu_type == REF_CMU) {
  784. val = CMU_REG3_VCOVARSEL_SET(val, 0x3);
  785. val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x10);
  786. } else {
  787. val = CMU_REG3_VCOVARSEL_SET(val, 0xF);
  788. if (preA3Chip)
  789. val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x15);
  790. else
  791. val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x1a);
  792. val = CMU_REG3_VCO_MANMOMSEL_SET(val, 0x15);
  793. }
  794. cmu_wr(ctx, cmu_type, CMU_REG3, val);
  795. /* Disable force PLL lock */
  796. cmu_rd(ctx, cmu_type, CMU_REG26, &val);
  797. val = CMU_REG26_FORCE_PLL_LOCK_SET(val, 0x0);
  798. cmu_wr(ctx, cmu_type, CMU_REG26, val);
  799. /* Setup PLL loop filter */
  800. cmu_rd(ctx, cmu_type, CMU_REG5, &val);
  801. val = CMU_REG5_PLL_LFSMCAP_SET(val, 0x3);
  802. val = CMU_REG5_PLL_LFCAP_SET(val, 0x3);
  803. if (cmu_type == REF_CMU || !preA3Chip)
  804. val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x7);
  805. else
  806. val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x4);
  807. cmu_wr(ctx, cmu_type, CMU_REG5, val);
  808. /* Enable or disable manual calibration */
  809. cmu_rd(ctx, cmu_type, CMU_REG6, &val);
  810. val = CMU_REG6_PLL_VREGTRIM_SET(val, preA3Chip ? 0x0 : 0x2);
  811. val = CMU_REG6_MAN_PVT_CAL_SET(val, preA3Chip ? 0x1 : 0x0);
  812. cmu_wr(ctx, cmu_type, CMU_REG6, val);
  813. /* Configure lane for 20-bits */
  814. if (cmu_type == PHY_CMU) {
  815. cmu_rd(ctx, cmu_type, CMU_REG9, &val);
  816. val = CMU_REG9_TX_WORD_MODE_CH1_SET(val,
  817. CMU_REG9_WORD_LEN_20BIT);
  818. val = CMU_REG9_TX_WORD_MODE_CH0_SET(val,
  819. CMU_REG9_WORD_LEN_20BIT);
  820. val = CMU_REG9_PLL_POST_DIVBY2_SET(val, 0x1);
  821. if (!preA3Chip) {
  822. val = CMU_REG9_VBG_BYPASSB_SET(val, 0x0);
  823. val = CMU_REG9_IGEN_BYPASS_SET(val , 0x0);
  824. }
  825. cmu_wr(ctx, cmu_type, CMU_REG9, val);
  826. if (!preA3Chip) {
  827. cmu_rd(ctx, cmu_type, CMU_REG10, &val);
  828. val = CMU_REG10_VREG_REFSEL_SET(val, 0x1);
  829. cmu_wr(ctx, cmu_type, CMU_REG10, val);
  830. }
  831. }
  832. cmu_rd(ctx, cmu_type, CMU_REG16, &val);
  833. val = CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(val, 0x1);
  834. val = CMU_REG16_BYPASS_PLL_LOCK_SET(val, 0x1);
  835. if (cmu_type == REF_CMU || preA3Chip)
  836. val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x4);
  837. else
  838. val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7);
  839. cmu_wr(ctx, cmu_type, CMU_REG16, val);
  840. /* Configure for SATA */
  841. cmu_rd(ctx, cmu_type, CMU_REG30, &val);
  842. val = CMU_REG30_PCIE_MODE_SET(val, 0x0);
  843. val = CMU_REG30_LOCK_COUNT_SET(val, 0x3);
  844. cmu_wr(ctx, cmu_type, CMU_REG30, val);
  845. /* Disable state machine bypass */
  846. cmu_wr(ctx, cmu_type, CMU_REG31, 0xF);
  847. cmu_rd(ctx, cmu_type, CMU_REG32, &val);
  848. val = CMU_REG32_PVT_CAL_WAIT_SEL_SET(val, 0x3);
  849. if (cmu_type == REF_CMU || preA3Chip)
  850. val = CMU_REG32_IREF_ADJ_SET(val, 0x3);
  851. else
  852. val = CMU_REG32_IREF_ADJ_SET(val, 0x1);
  853. cmu_wr(ctx, cmu_type, CMU_REG32, val);
  854. /* Set VCO calibration threshold */
  855. if (cmu_type != REF_CMU && preA3Chip)
  856. cmu_wr(ctx, cmu_type, CMU_REG34, 0x8d27);
  857. else
  858. cmu_wr(ctx, cmu_type, CMU_REG34, 0x873c);
  859. /* Set CTLE Override and override waiting from state machine */
  860. cmu_wr(ctx, cmu_type, CMU_REG37, 0xF00F);
  861. }
  862. static void xgene_phy_ssc_enable(struct xgene_phy_ctx *ctx,
  863. enum cmu_type_t cmu_type)
  864. {
  865. u32 val;
  866. /* Set SSC modulation value */
  867. cmu_rd(ctx, cmu_type, CMU_REG35, &val);
  868. val = CMU_REG35_PLL_SSC_MOD_SET(val, 98);
  869. cmu_wr(ctx, cmu_type, CMU_REG35, val);
  870. /* Enable SSC, set vertical step and DSM value */
  871. cmu_rd(ctx, cmu_type, CMU_REG36, &val);
  872. val = CMU_REG36_PLL_SSC_VSTEP_SET(val, 30);
  873. val = CMU_REG36_PLL_SSC_EN_SET(val, 1);
  874. val = CMU_REG36_PLL_SSC_DSMSEL_SET(val, 1);
  875. cmu_wr(ctx, cmu_type, CMU_REG36, val);
  876. /* Reset the PLL */
  877. cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
  878. cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
  879. /* Force VCO calibration to restart */
  880. cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
  881. CMU_REG32_FORCE_VCOCAL_START_MASK);
  882. }
  883. static void xgene_phy_sata_cfg_lanes(struct xgene_phy_ctx *ctx)
  884. {
  885. u32 val;
  886. u32 reg;
  887. int i;
  888. int lane;
  889. for (lane = 0; lane < MAX_LANE; lane++) {
  890. serdes_wr(ctx, lane, RXTX_REG147, 0x6);
  891. /* Set boost control for quarter, half, and full rate */
  892. serdes_rd(ctx, lane, RXTX_REG0, &val);
  893. val = RXTX_REG0_CTLE_EQ_HR_SET(val, 0x10);
  894. val = RXTX_REG0_CTLE_EQ_QR_SET(val, 0x10);
  895. val = RXTX_REG0_CTLE_EQ_FR_SET(val, 0x10);
  896. serdes_wr(ctx, lane, RXTX_REG0, val);
  897. /* Set boost control value */
  898. serdes_rd(ctx, lane, RXTX_REG1, &val);
  899. val = RXTX_REG1_RXACVCM_SET(val, 0x7);
  900. val = RXTX_REG1_CTLE_EQ_SET(val,
  901. ctx->sata_param.txboostgain[lane * 3 +
  902. ctx->sata_param.speed[lane]]);
  903. serdes_wr(ctx, lane, RXTX_REG1, val);
  904. /* Latch VTT value based on the termination to ground and
  905. enable TX FIFO */
  906. serdes_rd(ctx, lane, RXTX_REG2, &val);
  907. val = RXTX_REG2_VTT_ENA_SET(val, 0x1);
  908. val = RXTX_REG2_VTT_SEL_SET(val, 0x1);
  909. val = RXTX_REG2_TX_FIFO_ENA_SET(val, 0x1);
  910. serdes_wr(ctx, lane, RXTX_REG2, val);
  911. /* Configure Tx for 20-bits */
  912. serdes_rd(ctx, lane, RXTX_REG4, &val);
  913. val = RXTX_REG4_TX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT);
  914. serdes_wr(ctx, lane, RXTX_REG4, val);
  915. if (!preA3Chip) {
  916. serdes_rd(ctx, lane, RXTX_REG1, &val);
  917. val = RXTX_REG1_RXVREG1_SET(val, 0x2);
  918. val = RXTX_REG1_RXIREF_ADJ_SET(val, 0x2);
  919. serdes_wr(ctx, lane, RXTX_REG1, val);
  920. }
  921. /* Set pre-emphasis first 1 and 2, and post-emphasis values */
  922. serdes_rd(ctx, lane, RXTX_REG5, &val);
  923. val = RXTX_REG5_TX_CN1_SET(val,
  924. ctx->sata_param.txprecursor_cn1[lane * 3 +
  925. ctx->sata_param.speed[lane]]);
  926. val = RXTX_REG5_TX_CP1_SET(val,
  927. ctx->sata_param.txpostcursor_cp1[lane * 3 +
  928. ctx->sata_param.speed[lane]]);
  929. val = RXTX_REG5_TX_CN2_SET(val,
  930. ctx->sata_param.txprecursor_cn2[lane * 3 +
  931. ctx->sata_param.speed[lane]]);
  932. serdes_wr(ctx, lane, RXTX_REG5, val);
  933. /* Set TX amplitude value */
  934. serdes_rd(ctx, lane, RXTX_REG6, &val);
  935. val = RXTX_REG6_TXAMP_CNTL_SET(val,
  936. ctx->sata_param.txamplitude[lane * 3 +
  937. ctx->sata_param.speed[lane]]);
  938. val = RXTX_REG6_TXAMP_ENA_SET(val, 0x1);
  939. val = RXTX_REG6_TX_IDLE_SET(val, 0x0);
  940. val = RXTX_REG6_RX_BIST_RESYNC_SET(val, 0x0);
  941. val = RXTX_REG6_RX_BIST_ERRCNT_RD_SET(val, 0x0);
  942. serdes_wr(ctx, lane, RXTX_REG6, val);
  943. /* Configure Rx for 20-bits */
  944. serdes_rd(ctx, lane, RXTX_REG7, &val);
  945. val = RXTX_REG7_BIST_ENA_RX_SET(val, 0x0);
  946. val = RXTX_REG7_RX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT);
  947. serdes_wr(ctx, lane, RXTX_REG7, val);
  948. /* Set CDR and LOS values and enable Rx SSC */
  949. serdes_rd(ctx, lane, RXTX_REG8, &val);
  950. val = RXTX_REG8_CDR_LOOP_ENA_SET(val, 0x1);
  951. val = RXTX_REG8_CDR_BYPASS_RXLOS_SET(val, 0x0);
  952. val = RXTX_REG8_SSC_ENABLE_SET(val, 0x1);
  953. val = RXTX_REG8_SD_DISABLE_SET(val, 0x0);
  954. val = RXTX_REG8_SD_VREF_SET(val, 0x4);
  955. serdes_wr(ctx, lane, RXTX_REG8, val);
  956. /* Set phase adjust upper/lower limits */
  957. serdes_rd(ctx, lane, RXTX_REG11, &val);
  958. val = RXTX_REG11_PHASE_ADJUST_LIMIT_SET(val, 0x0);
  959. serdes_wr(ctx, lane, RXTX_REG11, val);
  960. /* Enable Latch Off; disable SUMOS and Tx termination */
  961. serdes_rd(ctx, lane, RXTX_REG12, &val);
  962. val = RXTX_REG12_LATCH_OFF_ENA_SET(val, 0x1);
  963. val = RXTX_REG12_SUMOS_ENABLE_SET(val, 0x0);
  964. val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0x0);
  965. serdes_wr(ctx, lane, RXTX_REG12, val);
  966. /* Set period error latch to 512T and enable BWL */
  967. serdes_rd(ctx, lane, RXTX_REG26, &val);
  968. val = RXTX_REG26_PERIOD_ERROR_LATCH_SET(val, 0x0);
  969. val = RXTX_REG26_BLWC_ENA_SET(val, 0x1);
  970. serdes_wr(ctx, lane, RXTX_REG26, val);
  971. serdes_wr(ctx, lane, RXTX_REG28, 0x0);
  972. /* Set DFE loop preset value */
  973. serdes_wr(ctx, lane, RXTX_REG31, 0x0);
  974. /* Set Eye Monitor counter width to 12-bit */
  975. serdes_rd(ctx, lane, RXTX_REG61, &val);
  976. val = RXTX_REG61_ISCAN_INBERT_SET(val, 0x1);
  977. val = RXTX_REG61_LOADFREQ_SHIFT_SET(val, 0x0);
  978. val = RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(val, 0x0);
  979. serdes_wr(ctx, lane, RXTX_REG61, val);
  980. serdes_rd(ctx, lane, RXTX_REG62, &val);
  981. val = RXTX_REG62_PERIOD_H1_QLATCH_SET(val, 0x0);
  982. serdes_wr(ctx, lane, RXTX_REG62, val);
  983. /* Set BW select tap X for DFE loop */
  984. for (i = 0; i < 9; i++) {
  985. reg = RXTX_REG81 + i * 2;
  986. serdes_rd(ctx, lane, reg, &val);
  987. val = RXTX_REG89_MU_TH7_SET(val, 0xe);
  988. val = RXTX_REG89_MU_TH8_SET(val, 0xe);
  989. val = RXTX_REG89_MU_TH9_SET(val, 0xe);
  990. serdes_wr(ctx, lane, reg, val);
  991. }
  992. /* Set BW select tap X for frequency adjust loop */
  993. for (i = 0; i < 3; i++) {
  994. reg = RXTX_REG96 + i * 2;
  995. serdes_rd(ctx, lane, reg, &val);
  996. val = RXTX_REG96_MU_FREQ1_SET(val, 0x10);
  997. val = RXTX_REG96_MU_FREQ2_SET(val, 0x10);
  998. val = RXTX_REG96_MU_FREQ3_SET(val, 0x10);
  999. serdes_wr(ctx, lane, reg, val);
  1000. }
  1001. /* Set BW select tap X for phase adjust loop */
  1002. for (i = 0; i < 3; i++) {
  1003. reg = RXTX_REG99 + i * 2;
  1004. serdes_rd(ctx, lane, reg, &val);
  1005. val = RXTX_REG99_MU_PHASE1_SET(val, 0x7);
  1006. val = RXTX_REG99_MU_PHASE2_SET(val, 0x7);
  1007. val = RXTX_REG99_MU_PHASE3_SET(val, 0x7);
  1008. serdes_wr(ctx, lane, reg, val);
  1009. }
  1010. serdes_rd(ctx, lane, RXTX_REG102, &val);
  1011. val = RXTX_REG102_FREQLOOP_LIMIT_SET(val, 0x0);
  1012. serdes_wr(ctx, lane, RXTX_REG102, val);
  1013. serdes_wr(ctx, lane, RXTX_REG114, 0xffe0);
  1014. serdes_rd(ctx, lane, RXTX_REG125, &val);
  1015. val = RXTX_REG125_SIGN_PQ_SET(val,
  1016. ctx->sata_param.txeyedirection[lane * 3 +
  1017. ctx->sata_param.speed[lane]]);
  1018. val = RXTX_REG125_PQ_REG_SET(val,
  1019. ctx->sata_param.txeyetuning[lane * 3 +
  1020. ctx->sata_param.speed[lane]]);
  1021. val = RXTX_REG125_PHZ_MANUAL_SET(val, 0x1);
  1022. serdes_wr(ctx, lane, RXTX_REG125, val);
  1023. serdes_rd(ctx, lane, RXTX_REG127, &val);
  1024. val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x0);
  1025. serdes_wr(ctx, lane, RXTX_REG127, val);
  1026. serdes_rd(ctx, lane, RXTX_REG128, &val);
  1027. val = RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(val, 0x3);
  1028. serdes_wr(ctx, lane, RXTX_REG128, val);
  1029. serdes_rd(ctx, lane, RXTX_REG145, &val);
  1030. val = RXTX_REG145_RXDFE_CONFIG_SET(val, 0x3);
  1031. val = RXTX_REG145_TX_IDLE_SATA_SET(val, 0x0);
  1032. if (preA3Chip) {
  1033. val = RXTX_REG145_RXES_ENA_SET(val, 0x1);
  1034. val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x1);
  1035. } else {
  1036. val = RXTX_REG145_RXES_ENA_SET(val, 0x0);
  1037. val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x0);
  1038. }
  1039. serdes_wr(ctx, lane, RXTX_REG145, val);
  1040. /*
  1041. * Set Rx LOS filter clock rate, sample rate, and threshold
  1042. * windows
  1043. */
  1044. for (i = 0; i < 4; i++) {
  1045. reg = RXTX_REG148 + i * 2;
  1046. serdes_wr(ctx, lane, reg, 0xFFFF);
  1047. }
  1048. }
  1049. }
  1050. static int xgene_phy_cal_rdy_chk(struct xgene_phy_ctx *ctx,
  1051. enum cmu_type_t cmu_type,
  1052. enum clk_type_t clk_type)
  1053. {
  1054. void __iomem *csr_serdes = ctx->sds_base;
  1055. int loop;
  1056. u32 val;
  1057. /* Release PHY main reset */
  1058. writel(0xdf, csr_serdes + SATA_ENET_SDS_RST_CTL);
  1059. readl(csr_serdes + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
  1060. if (cmu_type != REF_CMU) {
  1061. cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK);
  1062. /*
  1063. * As per PHY design spec, the PLL reset requires a minimum
  1064. * of 800us.
  1065. */
  1066. usleep_range(800, 1000);
  1067. cmu_rd(ctx, cmu_type, CMU_REG1, &val);
  1068. val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0);
  1069. cmu_wr(ctx, cmu_type, CMU_REG1, val);
  1070. /*
  1071. * As per PHY design spec, the PLL auto calibration requires
  1072. * a minimum of 800us.
  1073. */
  1074. usleep_range(800, 1000);
  1075. cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
  1076. CMU_REG32_FORCE_VCOCAL_START_MASK);
  1077. /*
  1078. * As per PHY design spec, the PLL requires a minimum of
  1079. * 800us to settle.
  1080. */
  1081. usleep_range(800, 1000);
  1082. }
  1083. if (!preA3Chip)
  1084. goto skip_manual_cal;
  1085. /*
  1086. * Configure the termination resister calibration
  1087. * The serial receive pins, RXP/RXN, have TERMination resistor
  1088. * that is required to be calibrated.
  1089. */
  1090. cmu_rd(ctx, cmu_type, CMU_REG17, &val);
  1091. val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x12);
  1092. val = CMU_REG17_RESERVED_7_SET(val, 0x0);
  1093. cmu_wr(ctx, cmu_type, CMU_REG17, val);
  1094. cmu_toggle1to0(ctx, cmu_type, CMU_REG17,
  1095. CMU_REG17_PVT_TERM_MAN_ENA_MASK);
  1096. /*
  1097. * The serial transmit pins, TXP/TXN, have Pull-UP and Pull-DOWN
  1098. * resistors that are required to the calibrated.
  1099. * Configure the pull DOWN calibration
  1100. */
  1101. cmu_rd(ctx, cmu_type, CMU_REG17, &val);
  1102. val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x29);
  1103. val = CMU_REG17_RESERVED_7_SET(val, 0x0);
  1104. cmu_wr(ctx, cmu_type, CMU_REG17, val);
  1105. cmu_toggle1to0(ctx, cmu_type, CMU_REG16,
  1106. CMU_REG16_PVT_DN_MAN_ENA_MASK);
  1107. /* Configure the pull UP calibration */
  1108. cmu_rd(ctx, cmu_type, CMU_REG17, &val);
  1109. val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x28);
  1110. val = CMU_REG17_RESERVED_7_SET(val, 0x0);
  1111. cmu_wr(ctx, cmu_type, CMU_REG17, val);
  1112. cmu_toggle1to0(ctx, cmu_type, CMU_REG16,
  1113. CMU_REG16_PVT_UP_MAN_ENA_MASK);
  1114. skip_manual_cal:
  1115. /* Poll the PLL calibration completion status for at least 1 ms */
  1116. loop = 100;
  1117. do {
  1118. cmu_rd(ctx, cmu_type, CMU_REG7, &val);
  1119. if (CMU_REG7_PLL_CALIB_DONE_RD(val))
  1120. break;
  1121. /*
  1122. * As per PHY design spec, PLL calibration status requires
  1123. * a minimum of 10us to be updated.
  1124. */
  1125. usleep_range(10, 100);
  1126. } while (--loop > 0);
  1127. cmu_rd(ctx, cmu_type, CMU_REG7, &val);
  1128. dev_dbg(ctx->dev, "PLL calibration %s\n",
  1129. CMU_REG7_PLL_CALIB_DONE_RD(val) ? "done" : "failed");
  1130. if (CMU_REG7_VCO_CAL_FAIL_RD(val)) {
  1131. dev_err(ctx->dev,
  1132. "PLL calibration failed due to VCO failure\n");
  1133. return -1;
  1134. }
  1135. dev_dbg(ctx->dev, "PLL calibration successful\n");
  1136. cmu_rd(ctx, cmu_type, CMU_REG15, &val);
  1137. dev_dbg(ctx->dev, "PHY Tx is %sready\n", val & 0x300 ? "" : "not ");
  1138. return 0;
  1139. }
  1140. static void xgene_phy_pdwn_force_vco(struct xgene_phy_ctx *ctx,
  1141. enum cmu_type_t cmu_type,
  1142. enum clk_type_t clk_type)
  1143. {
  1144. u32 val;
  1145. dev_dbg(ctx->dev, "Reset VCO and re-start again\n");
  1146. if (cmu_type == PHY_CMU) {
  1147. cmu_rd(ctx, cmu_type, CMU_REG16, &val);
  1148. val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7);
  1149. cmu_wr(ctx, cmu_type, CMU_REG16, val);
  1150. }
  1151. cmu_toggle1to0(ctx, cmu_type, CMU_REG0, CMU_REG0_PDOWN_MASK);
  1152. cmu_toggle1to0(ctx, cmu_type, CMU_REG32,
  1153. CMU_REG32_FORCE_VCOCAL_START_MASK);
  1154. }
  1155. static int xgene_phy_hw_init_sata(struct xgene_phy_ctx *ctx,
  1156. enum clk_type_t clk_type, int ssc_enable)
  1157. {
  1158. void __iomem *sds_base = ctx->sds_base;
  1159. u32 val;
  1160. int i;
  1161. /* Configure the PHY for operation */
  1162. dev_dbg(ctx->dev, "Reset PHY\n");
  1163. /* Place PHY into reset */
  1164. writel(0x0, sds_base + SATA_ENET_SDS_RST_CTL);
  1165. val = readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
  1166. /* Release PHY lane from reset (active high) */
  1167. writel(0x20, sds_base + SATA_ENET_SDS_RST_CTL);
  1168. readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
  1169. /* Release all PHY module out of reset except PHY main reset */
  1170. writel(0xde, sds_base + SATA_ENET_SDS_RST_CTL);
  1171. readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */
  1172. /* Set the operation speed */
  1173. val = readl(sds_base + SATA_ENET_SDS_CTL1);
  1174. val = CFG_I_SPD_SEL_CDR_OVR1_SET(val,
  1175. ctx->sata_param.txspeed[ctx->sata_param.speed[0]]);
  1176. writel(val, sds_base + SATA_ENET_SDS_CTL1);
  1177. dev_dbg(ctx->dev, "Set the customer pin mode to SATA\n");
  1178. val = readl(sds_base + SATA_ENET_SDS_CTL0);
  1179. val = REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(val, 0x4421);
  1180. writel(val, sds_base + SATA_ENET_SDS_CTL0);
  1181. /* Configure the clock macro unit (CMU) clock type */
  1182. xgene_phy_cfg_cmu_clk_type(ctx, PHY_CMU, clk_type);
  1183. /* Configure the clock macro */
  1184. xgene_phy_sata_cfg_cmu_core(ctx, PHY_CMU, clk_type);
  1185. /* Enable SSC if enabled */
  1186. if (ssc_enable)
  1187. xgene_phy_ssc_enable(ctx, PHY_CMU);
  1188. /* Configure PHY lanes */
  1189. xgene_phy_sata_cfg_lanes(ctx);
  1190. /* Set Rx/Tx 20-bit */
  1191. val = readl(sds_base + SATA_ENET_SDS_PCS_CTL0);
  1192. val = REGSPEC_CFG_I_RX_WORDMODE0_SET(val, 0x3);
  1193. val = REGSPEC_CFG_I_TX_WORDMODE0_SET(val, 0x3);
  1194. writel(val, sds_base + SATA_ENET_SDS_PCS_CTL0);
  1195. /* Start PLL calibration and try for three times */
  1196. i = 10;
  1197. do {
  1198. if (!xgene_phy_cal_rdy_chk(ctx, PHY_CMU, clk_type))
  1199. break;
  1200. /* If failed, toggle the VCO power signal and start again */
  1201. xgene_phy_pdwn_force_vco(ctx, PHY_CMU, clk_type);
  1202. } while (--i > 0);
  1203. /* Even on failure, allow to continue any way */
  1204. if (i <= 0)
  1205. dev_err(ctx->dev, "PLL calibration failed\n");
  1206. return 0;
  1207. }
  1208. static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx,
  1209. enum clk_type_t clk_type,
  1210. int ssc_enable)
  1211. {
  1212. int rc;
  1213. dev_dbg(ctx->dev, "PHY init clk type %d\n", clk_type);
  1214. if (ctx->mode == MODE_SATA) {
  1215. rc = xgene_phy_hw_init_sata(ctx, clk_type, ssc_enable);
  1216. if (rc)
  1217. return rc;
  1218. } else {
  1219. dev_err(ctx->dev, "Un-supported customer pin mode %d\n",
  1220. ctx->mode);
  1221. return -ENODEV;
  1222. }
  1223. return 0;
  1224. }
  1225. /*
  1226. * Receiver Offset Calibration:
  1227. *
  1228. * Calibrate the receiver signal path offset in two steps - summar and
  1229. * latch calibrations
  1230. */
  1231. static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane)
  1232. {
  1233. int i;
  1234. struct {
  1235. u32 reg;
  1236. u32 val;
  1237. } serdes_reg[] = {
  1238. {RXTX_REG38, 0x0},
  1239. {RXTX_REG39, 0xff00},
  1240. {RXTX_REG40, 0xffff},
  1241. {RXTX_REG41, 0xffff},
  1242. {RXTX_REG42, 0xffff},
  1243. {RXTX_REG43, 0xffff},
  1244. {RXTX_REG44, 0xffff},
  1245. {RXTX_REG45, 0xffff},
  1246. {RXTX_REG46, 0xffff},
  1247. {RXTX_REG47, 0xfffc},
  1248. {RXTX_REG48, 0x0},
  1249. {RXTX_REG49, 0x0},
  1250. {RXTX_REG50, 0x0},
  1251. {RXTX_REG51, 0x0},
  1252. {RXTX_REG52, 0x0},
  1253. {RXTX_REG53, 0x0},
  1254. {RXTX_REG54, 0x0},
  1255. {RXTX_REG55, 0x0},
  1256. };
  1257. /* Start SUMMER calibration */
  1258. serdes_setbits(ctx, lane, RXTX_REG127,
  1259. RXTX_REG127_FORCE_SUM_CAL_START_MASK);
  1260. /*
  1261. * As per PHY design spec, the Summer calibration requires a minimum
  1262. * of 100us to complete.
  1263. */
  1264. usleep_range(100, 500);
  1265. serdes_clrbits(ctx, lane, RXTX_REG127,
  1266. RXTX_REG127_FORCE_SUM_CAL_START_MASK);
  1267. /*
  1268. * As per PHY design spec, the auto calibration requires a minimum
  1269. * of 100us to complete.
  1270. */
  1271. usleep_range(100, 500);
  1272. /* Start latch calibration */
  1273. serdes_setbits(ctx, lane, RXTX_REG127,
  1274. RXTX_REG127_FORCE_LAT_CAL_START_MASK);
  1275. /*
  1276. * As per PHY design spec, the latch calibration requires a minimum
  1277. * of 100us to complete.
  1278. */
  1279. usleep_range(100, 500);
  1280. serdes_clrbits(ctx, lane, RXTX_REG127,
  1281. RXTX_REG127_FORCE_LAT_CAL_START_MASK);
  1282. /* Configure the PHY lane for calibration */
  1283. serdes_wr(ctx, lane, RXTX_REG28, 0x7);
  1284. serdes_wr(ctx, lane, RXTX_REG31, 0x7e00);
  1285. serdes_clrbits(ctx, lane, RXTX_REG4,
  1286. RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK);
  1287. serdes_clrbits(ctx, lane, RXTX_REG7,
  1288. RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK);
  1289. for (i = 0; i < ARRAY_SIZE(serdes_reg); i++)
  1290. serdes_wr(ctx, lane, serdes_reg[i].reg,
  1291. serdes_reg[i].val);
  1292. }
  1293. static void xgene_phy_reset_rxd(struct xgene_phy_ctx *ctx, int lane)
  1294. {
  1295. /* Reset digital Rx */
  1296. serdes_clrbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK);
  1297. /* As per PHY design spec, the reset requires a minimum of 100us. */
  1298. usleep_range(100, 150);
  1299. serdes_setbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK);
  1300. }
  1301. static int xgene_phy_get_avg(int accum, int samples)
  1302. {
  1303. return (accum + (samples / 2)) / samples;
  1304. }
  1305. static void xgene_phy_gen_avg_val(struct xgene_phy_ctx *ctx, int lane)
  1306. {
  1307. int max_loop = 10;
  1308. int avg_loop = 0;
  1309. int lat_do = 0, lat_xo = 0, lat_eo = 0, lat_so = 0;
  1310. int lat_de = 0, lat_xe = 0, lat_ee = 0, lat_se = 0;
  1311. int sum_cal = 0;
  1312. int lat_do_itr, lat_xo_itr, lat_eo_itr, lat_so_itr;
  1313. int lat_de_itr, lat_xe_itr, lat_ee_itr, lat_se_itr;
  1314. int sum_cal_itr;
  1315. int fail_even;
  1316. int fail_odd;
  1317. u32 val;
  1318. dev_dbg(ctx->dev, "Generating avg calibration value for lane %d\n",
  1319. lane);
  1320. /* Enable RX Hi-Z termination */
  1321. serdes_setbits(ctx, lane, RXTX_REG12,
  1322. RXTX_REG12_RX_DET_TERM_ENABLE_MASK);
  1323. /* Turn off DFE */
  1324. serdes_wr(ctx, lane, RXTX_REG28, 0x0000);
  1325. /* DFE Presets to zero */
  1326. serdes_wr(ctx, lane, RXTX_REG31, 0x0000);
  1327. /*
  1328. * Receiver Offset Calibration:
  1329. * Calibrate the receiver signal path offset in two steps - summar
  1330. * and latch calibration.
  1331. * Runs the "Receiver Offset Calibration multiple times to determine
  1332. * the average value to use.
  1333. */
  1334. while (avg_loop < max_loop) {
  1335. /* Start the calibration */
  1336. xgene_phy_force_lat_summer_cal(ctx, lane);
  1337. serdes_rd(ctx, lane, RXTX_REG21, &val);
  1338. lat_do_itr = RXTX_REG21_DO_LATCH_CALOUT_RD(val);
  1339. lat_xo_itr = RXTX_REG21_XO_LATCH_CALOUT_RD(val);
  1340. fail_odd = RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(val);
  1341. serdes_rd(ctx, lane, RXTX_REG22, &val);
  1342. lat_eo_itr = RXTX_REG22_EO_LATCH_CALOUT_RD(val);
  1343. lat_so_itr = RXTX_REG22_SO_LATCH_CALOUT_RD(val);
  1344. fail_even = RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(val);
  1345. serdes_rd(ctx, lane, RXTX_REG23, &val);
  1346. lat_de_itr = RXTX_REG23_DE_LATCH_CALOUT_RD(val);
  1347. lat_xe_itr = RXTX_REG23_XE_LATCH_CALOUT_RD(val);
  1348. serdes_rd(ctx, lane, RXTX_REG24, &val);
  1349. lat_ee_itr = RXTX_REG24_EE_LATCH_CALOUT_RD(val);
  1350. lat_se_itr = RXTX_REG24_SE_LATCH_CALOUT_RD(val);
  1351. serdes_rd(ctx, lane, RXTX_REG121, &val);
  1352. sum_cal_itr = RXTX_REG121_SUMOS_CAL_CODE_RD(val);
  1353. /* Check for failure. If passed, sum them for averaging */
  1354. if ((fail_even == 0 || fail_even == 1) &&
  1355. (fail_odd == 0 || fail_odd == 1)) {
  1356. lat_do += lat_do_itr;
  1357. lat_xo += lat_xo_itr;
  1358. lat_eo += lat_eo_itr;
  1359. lat_so += lat_so_itr;
  1360. lat_de += lat_de_itr;
  1361. lat_xe += lat_xe_itr;
  1362. lat_ee += lat_ee_itr;
  1363. lat_se += lat_se_itr;
  1364. sum_cal += sum_cal_itr;
  1365. dev_dbg(ctx->dev, "Iteration %d:\n", avg_loop);
  1366. dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n",
  1367. lat_do_itr, lat_xo_itr, lat_eo_itr,
  1368. lat_so_itr);
  1369. dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n",
  1370. lat_de_itr, lat_xe_itr, lat_ee_itr,
  1371. lat_se_itr);
  1372. dev_dbg(ctx->dev, "SUM 0x%x\n", sum_cal_itr);
  1373. ++avg_loop;
  1374. } else {
  1375. dev_err(ctx->dev,
  1376. "Receiver calibration failed at %d loop\n",
  1377. avg_loop);
  1378. }
  1379. xgene_phy_reset_rxd(ctx, lane);
  1380. }
  1381. /* Update latch manual calibration with average value */
  1382. serdes_rd(ctx, lane, RXTX_REG127, &val);
  1383. val = RXTX_REG127_DO_LATCH_MANCAL_SET(val,
  1384. xgene_phy_get_avg(lat_do, max_loop));
  1385. val = RXTX_REG127_XO_LATCH_MANCAL_SET(val,
  1386. xgene_phy_get_avg(lat_xo, max_loop));
  1387. serdes_wr(ctx, lane, RXTX_REG127, val);
  1388. serdes_rd(ctx, lane, RXTX_REG128, &val);
  1389. val = RXTX_REG128_EO_LATCH_MANCAL_SET(val,
  1390. xgene_phy_get_avg(lat_eo, max_loop));
  1391. val = RXTX_REG128_SO_LATCH_MANCAL_SET(val,
  1392. xgene_phy_get_avg(lat_so, max_loop));
  1393. serdes_wr(ctx, lane, RXTX_REG128, val);
  1394. serdes_rd(ctx, lane, RXTX_REG129, &val);
  1395. val = RXTX_REG129_DE_LATCH_MANCAL_SET(val,
  1396. xgene_phy_get_avg(lat_de, max_loop));
  1397. val = RXTX_REG129_XE_LATCH_MANCAL_SET(val,
  1398. xgene_phy_get_avg(lat_xe, max_loop));
  1399. serdes_wr(ctx, lane, RXTX_REG129, val);
  1400. serdes_rd(ctx, lane, RXTX_REG130, &val);
  1401. val = RXTX_REG130_EE_LATCH_MANCAL_SET(val,
  1402. xgene_phy_get_avg(lat_ee, max_loop));
  1403. val = RXTX_REG130_SE_LATCH_MANCAL_SET(val,
  1404. xgene_phy_get_avg(lat_se, max_loop));
  1405. serdes_wr(ctx, lane, RXTX_REG130, val);
  1406. /* Update SUMMER calibration with average value */
  1407. serdes_rd(ctx, lane, RXTX_REG14, &val);
  1408. val = RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(val,
  1409. xgene_phy_get_avg(sum_cal, max_loop));
  1410. serdes_wr(ctx, lane, RXTX_REG14, val);
  1411. dev_dbg(ctx->dev, "Average Value:\n");
  1412. dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n",
  1413. xgene_phy_get_avg(lat_do, max_loop),
  1414. xgene_phy_get_avg(lat_xo, max_loop),
  1415. xgene_phy_get_avg(lat_eo, max_loop),
  1416. xgene_phy_get_avg(lat_so, max_loop));
  1417. dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n",
  1418. xgene_phy_get_avg(lat_de, max_loop),
  1419. xgene_phy_get_avg(lat_xe, max_loop),
  1420. xgene_phy_get_avg(lat_ee, max_loop),
  1421. xgene_phy_get_avg(lat_se, max_loop));
  1422. dev_dbg(ctx->dev, "SUM 0x%x\n",
  1423. xgene_phy_get_avg(sum_cal, max_loop));
  1424. serdes_rd(ctx, lane, RXTX_REG14, &val);
  1425. val = RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(val, 0x1);
  1426. serdes_wr(ctx, lane, RXTX_REG14, val);
  1427. dev_dbg(ctx->dev, "Enable Manual Summer calibration\n");
  1428. serdes_rd(ctx, lane, RXTX_REG127, &val);
  1429. val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x1);
  1430. dev_dbg(ctx->dev, "Enable Manual Latch calibration\n");
  1431. serdes_wr(ctx, lane, RXTX_REG127, val);
  1432. /* Disable RX Hi-Z termination */
  1433. serdes_rd(ctx, lane, RXTX_REG12, &val);
  1434. val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0);
  1435. serdes_wr(ctx, lane, RXTX_REG12, val);
  1436. /* Turn on DFE */
  1437. serdes_wr(ctx, lane, RXTX_REG28, 0x0007);
  1438. /* Set DFE preset */
  1439. serdes_wr(ctx, lane, RXTX_REG31, 0x7e00);
  1440. }
  1441. static int xgene_phy_hw_init(struct phy *phy)
  1442. {
  1443. struct xgene_phy_ctx *ctx = phy_get_drvdata(phy);
  1444. int rc;
  1445. int i;
  1446. rc = xgene_phy_hw_initialize(ctx, CLK_EXT_DIFF, SSC_DISABLE);
  1447. if (rc) {
  1448. dev_err(ctx->dev, "PHY initialize failed %d\n", rc);
  1449. return rc;
  1450. }
  1451. /* Setup clock properly after PHY configuration */
  1452. if (!IS_ERR(ctx->clk)) {
  1453. /* HW requires an toggle of the clock */
  1454. clk_prepare_enable(ctx->clk);
  1455. clk_disable_unprepare(ctx->clk);
  1456. clk_prepare_enable(ctx->clk);
  1457. }
  1458. /* Compute average value */
  1459. for (i = 0; i < MAX_LANE; i++)
  1460. xgene_phy_gen_avg_val(ctx, i);
  1461. dev_dbg(ctx->dev, "PHY initialized\n");
  1462. return 0;
  1463. }
  1464. static const struct phy_ops xgene_phy_ops = {
  1465. .init = xgene_phy_hw_init,
  1466. .owner = THIS_MODULE,
  1467. };
  1468. static struct phy *xgene_phy_xlate(struct device *dev,
  1469. struct of_phandle_args *args)
  1470. {
  1471. struct xgene_phy_ctx *ctx = dev_get_drvdata(dev);
  1472. if (args->args_count <= 0)
  1473. return ERR_PTR(-EINVAL);
  1474. if (args->args[0] < MODE_SATA || args->args[0] >= MODE_MAX)
  1475. return ERR_PTR(-EINVAL);
  1476. ctx->mode = args->args[0];
  1477. return ctx->phy;
  1478. }
  1479. static void xgene_phy_get_param(struct platform_device *pdev,
  1480. const char *name, u32 *buffer,
  1481. int count, u32 *default_val,
  1482. u32 conv_factor)
  1483. {
  1484. int i;
  1485. if (!of_property_read_u32_array(pdev->dev.of_node, name, buffer,
  1486. count)) {
  1487. for (i = 0; i < count; i++)
  1488. buffer[i] /= conv_factor;
  1489. return;
  1490. }
  1491. /* Does not exist, load default */
  1492. for (i = 0; i < count; i++)
  1493. buffer[i] = default_val[i % 3];
  1494. }
  1495. static int xgene_phy_probe(struct platform_device *pdev)
  1496. {
  1497. struct phy_provider *phy_provider;
  1498. struct xgene_phy_ctx *ctx;
  1499. struct resource *res;
  1500. u32 default_spd[] = DEFAULT_SATA_SPD_SEL;
  1501. u32 default_txboost_gain[] = DEFAULT_SATA_TXBOOST_GAIN;
  1502. u32 default_txeye_direction[] = DEFAULT_SATA_TXEYEDIRECTION;
  1503. u32 default_txeye_tuning[] = DEFAULT_SATA_TXEYETUNING;
  1504. u32 default_txamp[] = DEFAULT_SATA_TXAMP;
  1505. u32 default_txcn1[] = DEFAULT_SATA_TXCN1;
  1506. u32 default_txcn2[] = DEFAULT_SATA_TXCN2;
  1507. u32 default_txcp1[] = DEFAULT_SATA_TXCP1;
  1508. int i;
  1509. ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
  1510. if (!ctx)
  1511. return -ENOMEM;
  1512. ctx->dev = &pdev->dev;
  1513. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1514. ctx->sds_base = devm_ioremap_resource(&pdev->dev, res);
  1515. if (IS_ERR(ctx->sds_base))
  1516. return PTR_ERR(ctx->sds_base);
  1517. /* Retrieve optional clock */
  1518. ctx->clk = clk_get(&pdev->dev, NULL);
  1519. /* Load override paramaters */
  1520. xgene_phy_get_param(pdev, "apm,tx-eye-tuning",
  1521. ctx->sata_param.txeyetuning, 6, default_txeye_tuning, 1);
  1522. xgene_phy_get_param(pdev, "apm,tx-eye-direction",
  1523. ctx->sata_param.txeyedirection, 6, default_txeye_direction, 1);
  1524. xgene_phy_get_param(pdev, "apm,tx-boost-gain",
  1525. ctx->sata_param.txboostgain, 6, default_txboost_gain, 1);
  1526. xgene_phy_get_param(pdev, "apm,tx-amplitude",
  1527. ctx->sata_param.txamplitude, 6, default_txamp, 13300);
  1528. xgene_phy_get_param(pdev, "apm,tx-pre-cursor1",
  1529. ctx->sata_param.txprecursor_cn1, 6, default_txcn1, 18200);
  1530. xgene_phy_get_param(pdev, "apm,tx-pre-cursor2",
  1531. ctx->sata_param.txprecursor_cn2, 6, default_txcn2, 18200);
  1532. xgene_phy_get_param(pdev, "apm,tx-post-cursor",
  1533. ctx->sata_param.txpostcursor_cp1, 6, default_txcp1, 18200);
  1534. xgene_phy_get_param(pdev, "apm,tx-speed",
  1535. ctx->sata_param.txspeed, 3, default_spd, 1);
  1536. for (i = 0; i < MAX_LANE; i++)
  1537. ctx->sata_param.speed[i] = 2; /* Default to Gen3 */
  1538. platform_set_drvdata(pdev, ctx);
  1539. ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops);
  1540. if (IS_ERR(ctx->phy)) {
  1541. dev_dbg(&pdev->dev, "Failed to create PHY\n");
  1542. return PTR_ERR(ctx->phy);
  1543. }
  1544. phy_set_drvdata(ctx->phy, ctx);
  1545. phy_provider = devm_of_phy_provider_register(ctx->dev, xgene_phy_xlate);
  1546. return PTR_ERR_OR_ZERO(phy_provider);
  1547. }
  1548. static const struct of_device_id xgene_phy_of_match[] = {
  1549. {.compatible = "apm,xgene-phy",},
  1550. {},
  1551. };
  1552. MODULE_DEVICE_TABLE(of, xgene_phy_of_match);
  1553. static struct platform_driver xgene_phy_driver = {
  1554. .probe = xgene_phy_probe,
  1555. .driver = {
  1556. .name = "xgene-phy",
  1557. .of_match_table = xgene_phy_of_match,
  1558. },
  1559. };
  1560. module_platform_driver(xgene_phy_driver);
  1561. MODULE_DESCRIPTION("APM X-Gene Multi-Purpose PHY driver");
  1562. MODULE_AUTHOR("Loc Ho <lho@apm.com>");
  1563. MODULE_LICENSE("GPL v2");
  1564. MODULE_VERSION("0.1");