vphb.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/pci.h>
  10. #include <misc/cxl.h>
  11. #include "cxl.h"
  12. static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
  13. {
  14. if (dma_mask < DMA_BIT_MASK(64)) {
  15. pr_info("%s only 64bit DMA supported on CXL", __func__);
  16. return -EIO;
  17. }
  18. *(pdev->dev.dma_mask) = dma_mask;
  19. return 0;
  20. }
  21. static int cxl_pci_probe_mode(struct pci_bus *bus)
  22. {
  23. return PCI_PROBE_NORMAL;
  24. }
  25. static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  26. {
  27. return -ENODEV;
  28. }
  29. static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
  30. {
  31. /*
  32. * MSI should never be set but need still need to provide this call
  33. * back.
  34. */
  35. }
  36. static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
  37. {
  38. struct pci_controller *phb;
  39. struct cxl_afu *afu;
  40. struct cxl_context *ctx;
  41. phb = pci_bus_to_host(dev->bus);
  42. afu = (struct cxl_afu *)phb->private_data;
  43. if (!cxl_adapter_link_ok(afu->adapter)) {
  44. dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
  45. return false;
  46. }
  47. set_dma_ops(&dev->dev, &dma_direct_ops);
  48. set_dma_offset(&dev->dev, PAGE_OFFSET);
  49. /*
  50. * Allocate a context to do cxl things too. If we eventually do real
  51. * DMA ops, we'll need a default context to attach them to
  52. */
  53. ctx = cxl_dev_context_init(dev);
  54. if (!ctx)
  55. return false;
  56. dev->dev.archdata.cxl_ctx = ctx;
  57. return (cxl_afu_check_and_enable(afu) == 0);
  58. }
  59. static void cxl_pci_disable_device(struct pci_dev *dev)
  60. {
  61. struct cxl_context *ctx = cxl_get_context(dev);
  62. if (ctx) {
  63. if (ctx->status == STARTED) {
  64. dev_err(&dev->dev, "Default context started\n");
  65. return;
  66. }
  67. dev->dev.archdata.cxl_ctx = NULL;
  68. cxl_release_context(ctx);
  69. }
  70. }
  71. static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
  72. unsigned long type)
  73. {
  74. return 1;
  75. }
  76. static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
  77. {
  78. /* Should we do an AFU reset here ? */
  79. }
  80. static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
  81. {
  82. return (bus << 8) + devfn;
  83. }
  84. static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb,
  85. u8 bus, u8 devfn, int offset)
  86. {
  87. int record = cxl_pcie_cfg_record(bus, devfn);
  88. return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset;
  89. }
  90. static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
  91. int offset, int len,
  92. volatile void __iomem **ioaddr,
  93. u32 *mask, int *shift)
  94. {
  95. struct pci_controller *phb;
  96. struct cxl_afu *afu;
  97. unsigned long addr;
  98. phb = pci_bus_to_host(bus);
  99. if (phb == NULL)
  100. return PCIBIOS_DEVICE_NOT_FOUND;
  101. afu = (struct cxl_afu *)phb->private_data;
  102. if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
  103. return PCIBIOS_DEVICE_NOT_FOUND;
  104. if (offset >= (unsigned long)phb->cfg_data)
  105. return PCIBIOS_BAD_REGISTER_NUMBER;
  106. addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset);
  107. *ioaddr = (void *)(addr & ~0x3ULL);
  108. *shift = ((addr & 0x3) * 8);
  109. switch (len) {
  110. case 1:
  111. *mask = 0xff;
  112. break;
  113. case 2:
  114. *mask = 0xffff;
  115. break;
  116. default:
  117. *mask = 0xffffffff;
  118. break;
  119. }
  120. return 0;
  121. }
  122. static inline bool cxl_config_link_ok(struct pci_bus *bus)
  123. {
  124. struct pci_controller *phb;
  125. struct cxl_afu *afu;
  126. /* Config space IO is based on phb->cfg_addr, which is based on
  127. * afu_desc_mmio. This isn't safe to read/write when the link
  128. * goes down, as EEH tears down MMIO space.
  129. *
  130. * Check if the link is OK before proceeding.
  131. */
  132. phb = pci_bus_to_host(bus);
  133. if (phb == NULL)
  134. return false;
  135. afu = (struct cxl_afu *)phb->private_data;
  136. return cxl_adapter_link_ok(afu->adapter);
  137. }
  138. static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
  139. int offset, int len, u32 *val)
  140. {
  141. volatile void __iomem *ioaddr;
  142. int shift, rc;
  143. u32 mask;
  144. rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
  145. &mask, &shift);
  146. if (rc)
  147. return rc;
  148. if (!cxl_config_link_ok(bus))
  149. return PCIBIOS_DEVICE_NOT_FOUND;
  150. /* Can only read 32 bits */
  151. *val = (in_le32(ioaddr) >> shift) & mask;
  152. return PCIBIOS_SUCCESSFUL;
  153. }
  154. static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
  155. int offset, int len, u32 val)
  156. {
  157. volatile void __iomem *ioaddr;
  158. u32 v, mask;
  159. int shift, rc;
  160. rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
  161. &mask, &shift);
  162. if (rc)
  163. return rc;
  164. if (!cxl_config_link_ok(bus))
  165. return PCIBIOS_DEVICE_NOT_FOUND;
  166. /* Can only write 32 bits so do read-modify-write */
  167. mask <<= shift;
  168. val <<= shift;
  169. v = (in_le32(ioaddr) & ~mask) | (val & mask);
  170. out_le32(ioaddr, v);
  171. return PCIBIOS_SUCCESSFUL;
  172. }
  173. static struct pci_ops cxl_pcie_pci_ops =
  174. {
  175. .read = cxl_pcie_read_config,
  176. .write = cxl_pcie_write_config,
  177. };
  178. static struct pci_controller_ops cxl_pci_controller_ops =
  179. {
  180. .probe_mode = cxl_pci_probe_mode,
  181. .enable_device_hook = cxl_pci_enable_device_hook,
  182. .disable_device = cxl_pci_disable_device,
  183. .release_device = cxl_pci_disable_device,
  184. .window_alignment = cxl_pci_window_alignment,
  185. .reset_secondary_bus = cxl_pci_reset_secondary_bus,
  186. .setup_msi_irqs = cxl_setup_msi_irqs,
  187. .teardown_msi_irqs = cxl_teardown_msi_irqs,
  188. .dma_set_mask = cxl_dma_set_mask,
  189. };
  190. int cxl_pci_vphb_add(struct cxl_afu *afu)
  191. {
  192. struct pci_dev *phys_dev;
  193. struct pci_controller *phb, *phys_phb;
  194. phys_dev = to_pci_dev(afu->adapter->dev.parent);
  195. phys_phb = pci_bus_to_host(phys_dev->bus);
  196. /* Alloc and setup PHB data structure */
  197. phb = pcibios_alloc_controller(phys_phb->dn);
  198. if (!phb)
  199. return -ENODEV;
  200. /* Setup parent in sysfs */
  201. phb->parent = &phys_dev->dev;
  202. /* Setup the PHB using arch provided callback */
  203. phb->ops = &cxl_pcie_pci_ops;
  204. phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
  205. phb->cfg_data = (void *)(u64)afu->crs_len;
  206. phb->private_data = afu;
  207. phb->controller_ops = cxl_pci_controller_ops;
  208. /* Scan the bus */
  209. pcibios_scan_phb(phb);
  210. if (phb->bus == NULL)
  211. return -ENXIO;
  212. /* Claim resources. This might need some rework as well depending
  213. * whether we are doing probe-only or not, like assigning unassigned
  214. * resources etc...
  215. */
  216. pcibios_claim_one_bus(phb->bus);
  217. /* Add probed PCI devices to the device model */
  218. pci_bus_add_devices(phb->bus);
  219. afu->phb = phb;
  220. return 0;
  221. }
  222. void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
  223. {
  224. /* When we are reconfigured, the AFU's MMIO space is unmapped
  225. * and remapped. We need to reflect this in the PHB's view of
  226. * the world.
  227. */
  228. afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
  229. }
  230. void cxl_pci_vphb_remove(struct cxl_afu *afu)
  231. {
  232. struct pci_controller *phb;
  233. /* If there is no configuration record we won't have one of these */
  234. if (!afu || !afu->phb)
  235. return;
  236. phb = afu->phb;
  237. afu->phb = NULL;
  238. pci_remove_root_bus(phb->bus);
  239. pcibios_free_controller(phb);
  240. }
  241. struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
  242. {
  243. struct pci_controller *phb;
  244. phb = pci_bus_to_host(dev->bus);
  245. return (struct cxl_afu *)phb->private_data;
  246. }
  247. EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
  248. unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
  249. {
  250. return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
  251. }
  252. EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);