driver_pcie2.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /*
  2. * Broadcom specific AMBA
  3. * PCIe Gen 2 Core
  4. *
  5. * Copyright 2014, Broadcom Corporation
  6. * Copyright 2014, Rafał Miłecki <zajec5@gmail.com>
  7. *
  8. * Licensed under the GNU/GPL. See COPYING for details.
  9. */
  10. #include "bcma_private.h"
  11. #include <linux/bcma/bcma.h>
  12. #include <linux/pci.h>
  13. /**************************************************
  14. * R/W ops.
  15. **************************************************/
  16. #if 0
  17. static u32 bcma_core_pcie2_cfg_read(struct bcma_drv_pcie2 *pcie2, u32 addr)
  18. {
  19. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
  20. pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR);
  21. return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
  22. }
  23. #endif
  24. static void bcma_core_pcie2_cfg_write(struct bcma_drv_pcie2 *pcie2, u32 addr,
  25. u32 val)
  26. {
  27. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
  28. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, val);
  29. }
  30. /**************************************************
  31. * Init.
  32. **************************************************/
  33. static u32 bcma_core_pcie2_war_delay_perst_enab(struct bcma_drv_pcie2 *pcie2,
  34. bool enable)
  35. {
  36. u32 val;
  37. /* restore back to default */
  38. val = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
  39. val |= PCIE2_CLKC_DLYPERST;
  40. val &= ~PCIE2_CLKC_DISSPROMLD;
  41. if (enable) {
  42. val &= ~PCIE2_CLKC_DLYPERST;
  43. val |= PCIE2_CLKC_DISSPROMLD;
  44. }
  45. pcie2_write32(pcie2, (BCMA_CORE_PCIE2_CLK_CONTROL), val);
  46. /* flush */
  47. return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
  48. }
  49. static void bcma_core_pcie2_set_ltr_vals(struct bcma_drv_pcie2 *pcie2)
  50. {
  51. /* LTR0 */
  52. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x844);
  53. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x883c883c);
  54. /* LTR1 */
  55. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x848);
  56. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x88648864);
  57. /* LTR2 */
  58. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x84C);
  59. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x90039003);
  60. }
  61. static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
  62. {
  63. u8 core_rev = pcie2->core->id.rev;
  64. u32 devstsctr2;
  65. if (core_rev < 2 || core_rev == 10 || core_rev > 13)
  66. return;
  67. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
  68. PCIE2_CAP_DEVSTSCTRL2_OFFSET);
  69. devstsctr2 = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
  70. if (devstsctr2 & PCIE2_CAP_DEVSTSCTRL2_LTRENAB) {
  71. /* force the right LTR values */
  72. bcma_core_pcie2_set_ltr_vals(pcie2);
  73. /* TODO:
  74. si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0); */
  75. /* enable the LTR */
  76. devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
  77. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
  78. PCIE2_CAP_DEVSTSCTRL2_OFFSET);
  79. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, devstsctr2);
  80. /* set the LTR state to be active */
  81. pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
  82. PCIE2_LTR_ACTIVE);
  83. usleep_range(1000, 2000);
  84. /* set the LTR state to be sleep */
  85. pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
  86. PCIE2_LTR_SLEEP);
  87. usleep_range(1000, 2000);
  88. }
  89. }
  90. static void pciedev_crwlpciegen2(struct bcma_drv_pcie2 *pcie2)
  91. {
  92. u8 core_rev = pcie2->core->id.rev;
  93. bool pciewar160, pciewar162;
  94. pciewar160 = core_rev == 7 || core_rev == 9 || core_rev == 11;
  95. pciewar162 = core_rev == 5 || core_rev == 7 || core_rev == 8 ||
  96. core_rev == 9 || core_rev == 11;
  97. if (!pciewar160 && !pciewar162)
  98. return;
  99. /* TODO */
  100. #if 0
  101. pcie2_set32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL,
  102. PCIE_DISABLE_L1CLK_GATING);
  103. #if 0
  104. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
  105. PCIEGEN2_COE_PVT_TL_CTRL_0);
  106. pcie2_mask32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA,
  107. ~(1 << COE_PVT_TL_CTRL_0_PM_DIS_L1_REENTRY_BIT));
  108. #endif
  109. #endif
  110. }
  111. static void pciedev_crwlpciegen2_180(struct bcma_drv_pcie2 *pcie2)
  112. {
  113. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_PMCR_REFUP);
  114. pcie2_set32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x1f);
  115. }
  116. static void pciedev_crwlpciegen2_182(struct bcma_drv_pcie2 *pcie2)
  117. {
  118. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_SBMBX);
  119. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 1 << 0);
  120. }
  121. static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
  122. {
  123. struct bcma_drv_cc *drv_cc = &pcie2->core->bus->drv_cc;
  124. u8 core_rev = pcie2->core->id.rev;
  125. u32 alp_khz, pm_value;
  126. if (core_rev <= 13) {
  127. alp_khz = bcma_pmu_get_alp_clock(drv_cc) / 1000;
  128. pm_value = (1000000 * 2) / alp_khz;
  129. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
  130. PCIE2_PVT_REG_PM_CLK_PERIOD);
  131. pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, pm_value);
  132. }
  133. }
  134. void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
  135. {
  136. struct bcma_bus *bus = pcie2->core->bus;
  137. struct bcma_chipinfo *ci = &bus->chipinfo;
  138. u32 tmp;
  139. tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
  140. if ((tmp & 0xe) >> 1 == 2)
  141. bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
  142. switch (bus->chipinfo.id) {
  143. case BCMA_CHIP_ID_BCM4360:
  144. case BCMA_CHIP_ID_BCM4352:
  145. pcie2->reqsize = 1024;
  146. break;
  147. default:
  148. pcie2->reqsize = 128;
  149. break;
  150. }
  151. if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
  152. bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
  153. bcma_core_pcie2_hw_ltr_war(pcie2);
  154. pciedev_crwlpciegen2(pcie2);
  155. pciedev_reg_pm_clk_period(pcie2);
  156. pciedev_crwlpciegen2_180(pcie2);
  157. pciedev_crwlpciegen2_182(pcie2);
  158. }
  159. /**************************************************
  160. * Runtime ops.
  161. **************************************************/
  162. void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2)
  163. {
  164. struct bcma_bus *bus = pcie2->core->bus;
  165. struct pci_dev *dev = bus->host_pci;
  166. int err;
  167. err = pcie_set_readrq(dev, pcie2->reqsize);
  168. if (err)
  169. bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err);
  170. }