pcie-designware.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. /*
  2. * Synopsys Designware PCIe host controller driver
  3. *
  4. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com
  6. *
  7. * Author: Jingoo Han <jg1.han@samsung.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/irq.h>
  14. #include <linux/irqdomain.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/msi.h>
  18. #include <linux/of_address.h>
  19. #include <linux/of_pci.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci_regs.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/types.h>
  24. #include "pcie-designware.h"
  25. /* Synopsis specific PCIE configuration registers */
  26. #define PCIE_PORT_LINK_CONTROL 0x710
  27. #define PORT_LINK_MODE_MASK (0x3f << 16)
  28. #define PORT_LINK_MODE_1_LANES (0x1 << 16)
  29. #define PORT_LINK_MODE_2_LANES (0x3 << 16)
  30. #define PORT_LINK_MODE_4_LANES (0x7 << 16)
  31. #define PORT_LINK_MODE_8_LANES (0xf << 16)
  32. #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
  33. #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
  34. #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
  35. #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
  36. #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
  37. #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
  38. #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
  39. #define PCIE_MSI_ADDR_LO 0x820
  40. #define PCIE_MSI_ADDR_HI 0x824
  41. #define PCIE_MSI_INTR0_ENABLE 0x828
  42. #define PCIE_MSI_INTR0_MASK 0x82C
  43. #define PCIE_MSI_INTR0_STATUS 0x830
  44. #define PCIE_ATU_VIEWPORT 0x900
  45. #define PCIE_ATU_REGION_INBOUND (0x1 << 31)
  46. #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
  47. #define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
  48. #define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
  49. #define PCIE_ATU_CR1 0x904
  50. #define PCIE_ATU_TYPE_MEM (0x0 << 0)
  51. #define PCIE_ATU_TYPE_IO (0x2 << 0)
  52. #define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
  53. #define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
  54. #define PCIE_ATU_CR2 0x908
  55. #define PCIE_ATU_ENABLE (0x1 << 31)
  56. #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
  57. #define PCIE_ATU_LOWER_BASE 0x90C
  58. #define PCIE_ATU_UPPER_BASE 0x910
  59. #define PCIE_ATU_LIMIT 0x914
  60. #define PCIE_ATU_LOWER_TARGET 0x918
  61. #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
  62. #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
  63. #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
  64. #define PCIE_ATU_UPPER_TARGET 0x91C
  65. static struct pci_ops dw_pcie_ops;
  66. int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
  67. {
  68. if ((uintptr_t)addr & (size - 1)) {
  69. *val = 0;
  70. return PCIBIOS_BAD_REGISTER_NUMBER;
  71. }
  72. if (size == 4)
  73. *val = readl(addr);
  74. else if (size == 2)
  75. *val = readw(addr);
  76. else if (size == 1)
  77. *val = readb(addr);
  78. else {
  79. *val = 0;
  80. return PCIBIOS_BAD_REGISTER_NUMBER;
  81. }
  82. return PCIBIOS_SUCCESSFUL;
  83. }
  84. int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
  85. {
  86. if ((uintptr_t)addr & (size - 1))
  87. return PCIBIOS_BAD_REGISTER_NUMBER;
  88. if (size == 4)
  89. writel(val, addr);
  90. else if (size == 2)
  91. writew(val, addr);
  92. else if (size == 1)
  93. writeb(val, addr);
  94. else
  95. return PCIBIOS_BAD_REGISTER_NUMBER;
  96. return PCIBIOS_SUCCESSFUL;
  97. }
  98. static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
  99. {
  100. if (pp->ops->readl_rc)
  101. pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
  102. else
  103. *val = readl(pp->dbi_base + reg);
  104. }
  105. static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
  106. {
  107. if (pp->ops->writel_rc)
  108. pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
  109. else
  110. writel(val, pp->dbi_base + reg);
  111. }
  112. static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  113. u32 *val)
  114. {
  115. int ret;
  116. if (pp->ops->rd_own_conf)
  117. ret = pp->ops->rd_own_conf(pp, where, size, val);
  118. else
  119. ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
  120. return ret;
  121. }
  122. static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  123. u32 val)
  124. {
  125. int ret;
  126. if (pp->ops->wr_own_conf)
  127. ret = pp->ops->wr_own_conf(pp, where, size, val);
  128. else
  129. ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
  130. return ret;
  131. }
  132. static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
  133. int type, u64 cpu_addr, u64 pci_addr, u32 size)
  134. {
  135. dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
  136. PCIE_ATU_VIEWPORT);
  137. dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
  138. dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
  139. dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
  140. PCIE_ATU_LIMIT);
  141. dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
  142. dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
  143. dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
  144. dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
  145. }
  146. static struct irq_chip dw_msi_irq_chip = {
  147. .name = "PCI-MSI",
  148. .irq_enable = pci_msi_unmask_irq,
  149. .irq_disable = pci_msi_mask_irq,
  150. .irq_mask = pci_msi_mask_irq,
  151. .irq_unmask = pci_msi_unmask_irq,
  152. };
  153. /* MSI int handler */
  154. irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  155. {
  156. unsigned long val;
  157. int i, pos, irq;
  158. irqreturn_t ret = IRQ_NONE;
  159. for (i = 0; i < MAX_MSI_CTRLS; i++) {
  160. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
  161. (u32 *)&val);
  162. if (val) {
  163. ret = IRQ_HANDLED;
  164. pos = 0;
  165. while ((pos = find_next_bit(&val, 32, pos)) != 32) {
  166. irq = irq_find_mapping(pp->irq_domain,
  167. i * 32 + pos);
  168. dw_pcie_wr_own_conf(pp,
  169. PCIE_MSI_INTR0_STATUS + i * 12,
  170. 4, 1 << pos);
  171. generic_handle_irq(irq);
  172. pos++;
  173. }
  174. }
  175. }
  176. return ret;
  177. }
  178. void dw_pcie_msi_init(struct pcie_port *pp)
  179. {
  180. u64 msi_target;
  181. pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
  182. msi_target = virt_to_phys((void *)pp->msi_data);
  183. /* program the msi_data */
  184. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
  185. (u32)(msi_target & 0xffffffff));
  186. dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
  187. (u32)(msi_target >> 32 & 0xffffffff));
  188. }
  189. static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
  190. {
  191. unsigned int res, bit, val;
  192. res = (irq / 32) * 12;
  193. bit = irq % 32;
  194. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  195. val &= ~(1 << bit);
  196. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  197. }
  198. static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
  199. unsigned int nvec, unsigned int pos)
  200. {
  201. unsigned int i;
  202. for (i = 0; i < nvec; i++) {
  203. irq_set_msi_desc_off(irq_base, i, NULL);
  204. /* Disable corresponding interrupt on MSI controller */
  205. if (pp->ops->msi_clear_irq)
  206. pp->ops->msi_clear_irq(pp, pos + i);
  207. else
  208. dw_pcie_msi_clear_irq(pp, pos + i);
  209. }
  210. bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
  211. }
  212. static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
  213. {
  214. unsigned int res, bit, val;
  215. res = (irq / 32) * 12;
  216. bit = irq % 32;
  217. dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
  218. val |= 1 << bit;
  219. dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
  220. }
  221. static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
  222. {
  223. int irq, pos0, i;
  224. struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc);
  225. pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
  226. order_base_2(no_irqs));
  227. if (pos0 < 0)
  228. goto no_valid_irq;
  229. irq = irq_find_mapping(pp->irq_domain, pos0);
  230. if (!irq)
  231. goto no_valid_irq;
  232. /*
  233. * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
  234. * descs so there is no need to allocate descs here. We can therefore
  235. * assume that if irq_find_mapping above returns non-zero, then the
  236. * descs are also successfully allocated.
  237. */
  238. for (i = 0; i < no_irqs; i++) {
  239. if (irq_set_msi_desc_off(irq, i, desc) != 0) {
  240. clear_irq_range(pp, irq, i, pos0);
  241. goto no_valid_irq;
  242. }
  243. /*Enable corresponding interrupt in MSI interrupt controller */
  244. if (pp->ops->msi_set_irq)
  245. pp->ops->msi_set_irq(pp, pos0 + i);
  246. else
  247. dw_pcie_msi_set_irq(pp, pos0 + i);
  248. }
  249. *pos = pos0;
  250. desc->nvec_used = no_irqs;
  251. desc->msi_attrib.multiple = order_base_2(no_irqs);
  252. return irq;
  253. no_valid_irq:
  254. *pos = pos0;
  255. return -ENOSPC;
  256. }
  257. static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
  258. {
  259. struct msi_msg msg;
  260. u64 msi_target;
  261. if (pp->ops->get_msi_addr)
  262. msi_target = pp->ops->get_msi_addr(pp);
  263. else
  264. msi_target = virt_to_phys((void *)pp->msi_data);
  265. msg.address_lo = (u32)(msi_target & 0xffffffff);
  266. msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
  267. if (pp->ops->get_msi_data)
  268. msg.data = pp->ops->get_msi_data(pp, pos);
  269. else
  270. msg.data = pos;
  271. pci_write_msi_msg(irq, &msg);
  272. }
  273. static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
  274. struct msi_desc *desc)
  275. {
  276. int irq, pos;
  277. struct pcie_port *pp = pdev->bus->sysdata;
  278. if (desc->msi_attrib.is_msix)
  279. return -EINVAL;
  280. irq = assign_irq(1, desc, &pos);
  281. if (irq < 0)
  282. return irq;
  283. dw_msi_setup_msg(pp, irq, pos);
  284. return 0;
  285. }
  286. static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
  287. int nvec, int type)
  288. {
  289. #ifdef CONFIG_PCI_MSI
  290. int irq, pos;
  291. struct msi_desc *desc;
  292. struct pcie_port *pp = pdev->bus->sysdata;
  293. /* MSI-X interrupts are not supported */
  294. if (type == PCI_CAP_ID_MSIX)
  295. return -EINVAL;
  296. WARN_ON(!list_is_singular(&pdev->dev.msi_list));
  297. desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
  298. irq = assign_irq(nvec, desc, &pos);
  299. if (irq < 0)
  300. return irq;
  301. dw_msi_setup_msg(pp, irq, pos);
  302. return 0;
  303. #else
  304. return -EINVAL;
  305. #endif
  306. }
  307. static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
  308. {
  309. struct irq_data *data = irq_get_irq_data(irq);
  310. struct msi_desc *msi = irq_data_get_msi_desc(data);
  311. struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
  312. clear_irq_range(pp, irq, 1, data->hwirq);
  313. }
  314. static struct msi_controller dw_pcie_msi_chip = {
  315. .setup_irq = dw_msi_setup_irq,
  316. .setup_irqs = dw_msi_setup_irqs,
  317. .teardown_irq = dw_msi_teardown_irq,
  318. };
  319. int dw_pcie_link_up(struct pcie_port *pp)
  320. {
  321. if (pp->ops->link_up)
  322. return pp->ops->link_up(pp);
  323. else
  324. return 0;
  325. }
  326. static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
  327. irq_hw_number_t hwirq)
  328. {
  329. irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
  330. irq_set_chip_data(irq, domain->host_data);
  331. return 0;
  332. }
  333. static const struct irq_domain_ops msi_domain_ops = {
  334. .map = dw_pcie_msi_map,
  335. };
  336. int dw_pcie_host_init(struct pcie_port *pp)
  337. {
  338. struct device_node *np = pp->dev->of_node;
  339. struct platform_device *pdev = to_platform_device(pp->dev);
  340. struct pci_bus *bus, *child;
  341. struct resource *cfg_res;
  342. u32 val;
  343. int i, ret;
  344. LIST_HEAD(res);
  345. struct resource_entry *win;
  346. cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  347. if (cfg_res) {
  348. pp->cfg0_size = resource_size(cfg_res)/2;
  349. pp->cfg1_size = resource_size(cfg_res)/2;
  350. pp->cfg0_base = cfg_res->start;
  351. pp->cfg1_base = cfg_res->start + pp->cfg0_size;
  352. } else if (!pp->va_cfg0_base) {
  353. dev_err(pp->dev, "missing *config* reg space\n");
  354. }
  355. ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
  356. if (ret)
  357. return ret;
  358. /* Get the I/O and memory ranges from DT */
  359. resource_list_for_each_entry(win, &res) {
  360. switch (resource_type(win->res)) {
  361. case IORESOURCE_IO:
  362. pp->io = win->res;
  363. pp->io->name = "I/O";
  364. pp->io_size = resource_size(pp->io);
  365. pp->io_bus_addr = pp->io->start - win->offset;
  366. ret = pci_remap_iospace(pp->io, pp->io_base);
  367. if (ret) {
  368. dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
  369. ret, pp->io);
  370. continue;
  371. }
  372. break;
  373. case IORESOURCE_MEM:
  374. pp->mem = win->res;
  375. pp->mem->name = "MEM";
  376. pp->mem_size = resource_size(pp->mem);
  377. pp->mem_bus_addr = pp->mem->start - win->offset;
  378. break;
  379. case 0:
  380. pp->cfg = win->res;
  381. pp->cfg0_size = resource_size(pp->cfg)/2;
  382. pp->cfg1_size = resource_size(pp->cfg)/2;
  383. pp->cfg0_base = pp->cfg->start;
  384. pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
  385. break;
  386. case IORESOURCE_BUS:
  387. pp->busn = win->res;
  388. break;
  389. default:
  390. continue;
  391. }
  392. }
  393. if (!pp->dbi_base) {
  394. pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start,
  395. resource_size(pp->cfg));
  396. if (!pp->dbi_base) {
  397. dev_err(pp->dev, "error with ioremap\n");
  398. return -ENOMEM;
  399. }
  400. }
  401. pp->mem_base = pp->mem->start;
  402. if (!pp->va_cfg0_base) {
  403. pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
  404. pp->cfg0_size);
  405. if (!pp->va_cfg0_base) {
  406. dev_err(pp->dev, "error with ioremap in function\n");
  407. return -ENOMEM;
  408. }
  409. }
  410. if (!pp->va_cfg1_base) {
  411. pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
  412. pp->cfg1_size);
  413. if (!pp->va_cfg1_base) {
  414. dev_err(pp->dev, "error with ioremap\n");
  415. return -ENOMEM;
  416. }
  417. }
  418. ret = of_property_read_u32(np, "num-lanes", &pp->lanes);
  419. if (ret)
  420. pp->lanes = 0;
  421. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  422. if (!pp->ops->msi_host_init) {
  423. pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
  424. MAX_MSI_IRQS, &msi_domain_ops,
  425. &dw_pcie_msi_chip);
  426. if (!pp->irq_domain) {
  427. dev_err(pp->dev, "irq domain init failed\n");
  428. return -ENXIO;
  429. }
  430. for (i = 0; i < MAX_MSI_IRQS; i++)
  431. irq_create_mapping(pp->irq_domain, i);
  432. } else {
  433. ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
  434. if (ret < 0)
  435. return ret;
  436. }
  437. }
  438. if (pp->ops->host_init)
  439. pp->ops->host_init(pp);
  440. if (!pp->ops->rd_other_conf)
  441. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
  442. PCIE_ATU_TYPE_MEM, pp->mem_base,
  443. pp->mem_bus_addr, pp->mem_size);
  444. dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
  445. /* program correct class for RC */
  446. dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
  447. dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
  448. val |= PORT_LOGIC_SPEED_CHANGE;
  449. dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
  450. pp->root_bus_nr = pp->busn->start;
  451. if (IS_ENABLED(CONFIG_PCI_MSI)) {
  452. bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
  453. &dw_pcie_ops, pp, &res,
  454. &dw_pcie_msi_chip);
  455. dw_pcie_msi_chip.dev = pp->dev;
  456. } else
  457. bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
  458. pp, &res);
  459. if (!bus)
  460. return -ENOMEM;
  461. if (pp->ops->scan_bus)
  462. pp->ops->scan_bus(pp);
  463. #ifdef CONFIG_ARM
  464. /* support old dtbs that incorrectly describe IRQs */
  465. pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
  466. #endif
  467. if (!pci_has_flag(PCI_PROBE_ONLY)) {
  468. pci_bus_size_bridges(bus);
  469. pci_bus_assign_resources(bus);
  470. list_for_each_entry(child, &bus->children, node)
  471. pcie_bus_configure_settings(child);
  472. }
  473. pci_bus_add_devices(bus);
  474. return 0;
  475. }
  476. static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  477. u32 devfn, int where, int size, u32 *val)
  478. {
  479. int ret, type;
  480. u32 busdev, cfg_size;
  481. u64 cpu_addr;
  482. void __iomem *va_cfg_base;
  483. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  484. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  485. if (bus->parent->number == pp->root_bus_nr) {
  486. type = PCIE_ATU_TYPE_CFG0;
  487. cpu_addr = pp->cfg0_base;
  488. cfg_size = pp->cfg0_size;
  489. va_cfg_base = pp->va_cfg0_base;
  490. } else {
  491. type = PCIE_ATU_TYPE_CFG1;
  492. cpu_addr = pp->cfg1_base;
  493. cfg_size = pp->cfg1_size;
  494. va_cfg_base = pp->va_cfg1_base;
  495. }
  496. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
  497. type, cpu_addr,
  498. busdev, cfg_size);
  499. ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
  500. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
  501. PCIE_ATU_TYPE_IO, pp->io_base,
  502. pp->io_bus_addr, pp->io_size);
  503. return ret;
  504. }
  505. static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
  506. u32 devfn, int where, int size, u32 val)
  507. {
  508. int ret, type;
  509. u32 busdev, cfg_size;
  510. u64 cpu_addr;
  511. void __iomem *va_cfg_base;
  512. busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
  513. PCIE_ATU_FUNC(PCI_FUNC(devfn));
  514. if (bus->parent->number == pp->root_bus_nr) {
  515. type = PCIE_ATU_TYPE_CFG0;
  516. cpu_addr = pp->cfg0_base;
  517. cfg_size = pp->cfg0_size;
  518. va_cfg_base = pp->va_cfg0_base;
  519. } else {
  520. type = PCIE_ATU_TYPE_CFG1;
  521. cpu_addr = pp->cfg1_base;
  522. cfg_size = pp->cfg1_size;
  523. va_cfg_base = pp->va_cfg1_base;
  524. }
  525. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
  526. type, cpu_addr,
  527. busdev, cfg_size);
  528. ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
  529. dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
  530. PCIE_ATU_TYPE_IO, pp->io_base,
  531. pp->io_bus_addr, pp->io_size);
  532. return ret;
  533. }
  534. static int dw_pcie_valid_config(struct pcie_port *pp,
  535. struct pci_bus *bus, int dev)
  536. {
  537. /* If there is no link, then there is no device */
  538. if (bus->number != pp->root_bus_nr) {
  539. if (!dw_pcie_link_up(pp))
  540. return 0;
  541. }
  542. /* access only one slot on each root port */
  543. if (bus->number == pp->root_bus_nr && dev > 0)
  544. return 0;
  545. /*
  546. * do not read more than one device on the bus directly attached
  547. * to RC's (Virtual Bridge's) DS side.
  548. */
  549. if (bus->primary == pp->root_bus_nr && dev > 0)
  550. return 0;
  551. return 1;
  552. }
  553. static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
  554. int size, u32 *val)
  555. {
  556. struct pcie_port *pp = bus->sysdata;
  557. int ret;
  558. if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
  559. *val = 0xffffffff;
  560. return PCIBIOS_DEVICE_NOT_FOUND;
  561. }
  562. if (bus->number != pp->root_bus_nr)
  563. if (pp->ops->rd_other_conf)
  564. ret = pp->ops->rd_other_conf(pp, bus, devfn,
  565. where, size, val);
  566. else
  567. ret = dw_pcie_rd_other_conf(pp, bus, devfn,
  568. where, size, val);
  569. else
  570. ret = dw_pcie_rd_own_conf(pp, where, size, val);
  571. return ret;
  572. }
  573. static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
  574. int where, int size, u32 val)
  575. {
  576. struct pcie_port *pp = bus->sysdata;
  577. int ret;
  578. if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
  579. return PCIBIOS_DEVICE_NOT_FOUND;
  580. if (bus->number != pp->root_bus_nr)
  581. if (pp->ops->wr_other_conf)
  582. ret = pp->ops->wr_other_conf(pp, bus, devfn,
  583. where, size, val);
  584. else
  585. ret = dw_pcie_wr_other_conf(pp, bus, devfn,
  586. where, size, val);
  587. else
  588. ret = dw_pcie_wr_own_conf(pp, where, size, val);
  589. return ret;
  590. }
  591. static struct pci_ops dw_pcie_ops = {
  592. .read = dw_pcie_rd_conf,
  593. .write = dw_pcie_wr_conf,
  594. };
  595. void dw_pcie_setup_rc(struct pcie_port *pp)
  596. {
  597. u32 val;
  598. u32 membase;
  599. u32 memlimit;
  600. /* set the number of lanes */
  601. dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
  602. val &= ~PORT_LINK_MODE_MASK;
  603. switch (pp->lanes) {
  604. case 1:
  605. val |= PORT_LINK_MODE_1_LANES;
  606. break;
  607. case 2:
  608. val |= PORT_LINK_MODE_2_LANES;
  609. break;
  610. case 4:
  611. val |= PORT_LINK_MODE_4_LANES;
  612. break;
  613. case 8:
  614. val |= PORT_LINK_MODE_8_LANES;
  615. break;
  616. default:
  617. dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
  618. return;
  619. }
  620. dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
  621. /* set link width speed control register */
  622. dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
  623. val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
  624. switch (pp->lanes) {
  625. case 1:
  626. val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
  627. break;
  628. case 2:
  629. val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
  630. break;
  631. case 4:
  632. val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
  633. break;
  634. case 8:
  635. val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
  636. break;
  637. }
  638. dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
  639. /* setup RC BARs */
  640. dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
  641. dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
  642. /* setup interrupt pins */
  643. dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
  644. val &= 0xffff00ff;
  645. val |= 0x00000100;
  646. dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
  647. /* setup bus numbers */
  648. dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
  649. val &= 0xff000000;
  650. val |= 0x00010100;
  651. dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
  652. /* setup memory base, memory limit */
  653. membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
  654. memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
  655. val = memlimit | membase;
  656. dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
  657. /* setup command register */
  658. dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
  659. val &= 0xffff0000;
  660. val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  661. PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
  662. dw_pcie_writel_rc(pp, val, PCI_COMMAND);
  663. }
  664. MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
  665. MODULE_DESCRIPTION("Designware PCIe host controller driver");
  666. MODULE_LICENSE("GPL v2");