pci.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/pci_regs.h>
  10. #include <linux/pci_ids.h>
  11. #include <linux/device.h>
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/slab.h>
  15. #include <linux/sort.h>
  16. #include <linux/pci.h>
  17. #include <linux/of.h>
  18. #include <linux/delay.h>
  19. #include <asm/opal.h>
  20. #include <asm/msi_bitmap.h>
  21. #include <asm/pci-bridge.h> /* for struct pci_controller */
  22. #include <asm/pnv-pci.h>
  23. #include <asm/io.h>
  24. #include "cxl.h"
  25. #include <misc/cxl.h>
  26. #define CXL_PCI_VSEC_ID 0x1280
  27. #define CXL_VSEC_MIN_SIZE 0x80
  28. #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
  29. { \
  30. pci_read_config_word(dev, vsec + 0x6, dest); \
  31. *dest >>= 4; \
  32. }
  33. #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
  34. pci_read_config_byte(dev, vsec + 0x8, dest)
  35. #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
  36. pci_read_config_byte(dev, vsec + 0x9, dest)
  37. #define CXL_STATUS_SECOND_PORT 0x80
  38. #define CXL_STATUS_MSI_X_FULL 0x40
  39. #define CXL_STATUS_MSI_X_SINGLE 0x20
  40. #define CXL_STATUS_FLASH_RW 0x08
  41. #define CXL_STATUS_FLASH_RO 0x04
  42. #define CXL_STATUS_LOADABLE_AFU 0x02
  43. #define CXL_STATUS_LOADABLE_PSL 0x01
  44. /* If we see these features we won't try to use the card */
  45. #define CXL_UNSUPPORTED_FEATURES \
  46. (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
  47. #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
  48. pci_read_config_byte(dev, vsec + 0xa, dest)
  49. #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
  50. pci_write_config_byte(dev, vsec + 0xa, val)
  51. #define CXL_VSEC_PROTOCOL_MASK 0xe0
  52. #define CXL_VSEC_PROTOCOL_1024TB 0x80
  53. #define CXL_VSEC_PROTOCOL_512TB 0x40
  54. #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */
  55. #define CXL_VSEC_PROTOCOL_ENABLE 0x01
  56. #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
  57. pci_read_config_word(dev, vsec + 0xc, dest)
  58. #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
  59. pci_read_config_byte(dev, vsec + 0xe, dest)
  60. #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
  61. pci_read_config_byte(dev, vsec + 0xf, dest)
  62. #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
  63. pci_read_config_word(dev, vsec + 0x10, dest)
  64. #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
  65. pci_read_config_byte(dev, vsec + 0x13, dest)
  66. #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
  67. pci_write_config_byte(dev, vsec + 0x13, val)
  68. #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
  69. #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
  70. #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
  71. #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
  72. pci_read_config_dword(dev, vsec + 0x20, dest)
  73. #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
  74. pci_read_config_dword(dev, vsec + 0x24, dest)
  75. #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
  76. pci_read_config_dword(dev, vsec + 0x28, dest)
  77. #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
  78. pci_read_config_dword(dev, vsec + 0x2c, dest)
  79. /* This works a little different than the p1/p2 register accesses to make it
  80. * easier to pull out individual fields */
  81. #define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off)
  82. #define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off)
  83. #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
  84. #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
  85. #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
  86. #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
  87. #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
  88. #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
  89. #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
  90. #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
  91. #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
  92. #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
  93. #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
  94. #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
  95. #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  96. #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
  97. #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
  98. #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
  99. #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
  100. #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  101. #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
  102. #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
  103. #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
  104. #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
  105. u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off)
  106. {
  107. u64 aligned_off = off & ~0x3L;
  108. u32 val;
  109. val = cxl_afu_cr_read32(afu, cr, aligned_off);
  110. return (val >> ((off & 0x2) * 8)) & 0xffff;
  111. }
  112. u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
  113. {
  114. u64 aligned_off = off & ~0x3L;
  115. u32 val;
  116. val = cxl_afu_cr_read32(afu, cr, aligned_off);
  117. return (val >> ((off & 0x3) * 8)) & 0xff;
  118. }
  119. static const struct pci_device_id cxl_pci_tbl[] = {
  120. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
  121. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
  122. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
  123. { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
  124. { PCI_DEVICE_CLASS(0x120000, ~0), },
  125. { }
  126. };
  127. MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
  128. /*
  129. * Mostly using these wrappers to avoid confusion:
  130. * priv 1 is BAR2, while priv 2 is BAR0
  131. */
  132. static inline resource_size_t p1_base(struct pci_dev *dev)
  133. {
  134. return pci_resource_start(dev, 2);
  135. }
  136. static inline resource_size_t p1_size(struct pci_dev *dev)
  137. {
  138. return pci_resource_len(dev, 2);
  139. }
  140. static inline resource_size_t p2_base(struct pci_dev *dev)
  141. {
  142. return pci_resource_start(dev, 0);
  143. }
  144. static inline resource_size_t p2_size(struct pci_dev *dev)
  145. {
  146. return pci_resource_len(dev, 0);
  147. }
  148. static int find_cxl_vsec(struct pci_dev *dev)
  149. {
  150. int vsec = 0;
  151. u16 val;
  152. while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
  153. pci_read_config_word(dev, vsec + 0x4, &val);
  154. if (val == CXL_PCI_VSEC_ID)
  155. return vsec;
  156. }
  157. return 0;
  158. }
  159. static void dump_cxl_config_space(struct pci_dev *dev)
  160. {
  161. int vsec;
  162. u32 val;
  163. dev_info(&dev->dev, "dump_cxl_config_space\n");
  164. pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
  165. dev_info(&dev->dev, "BAR0: %#.8x\n", val);
  166. pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
  167. dev_info(&dev->dev, "BAR1: %#.8x\n", val);
  168. pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
  169. dev_info(&dev->dev, "BAR2: %#.8x\n", val);
  170. pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
  171. dev_info(&dev->dev, "BAR3: %#.8x\n", val);
  172. pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
  173. dev_info(&dev->dev, "BAR4: %#.8x\n", val);
  174. pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
  175. dev_info(&dev->dev, "BAR5: %#.8x\n", val);
  176. dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
  177. p1_base(dev), p1_size(dev));
  178. dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
  179. p2_base(dev), p2_size(dev));
  180. dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
  181. pci_resource_start(dev, 4), pci_resource_len(dev, 4));
  182. if (!(vsec = find_cxl_vsec(dev)))
  183. return;
  184. #define show_reg(name, what) \
  185. dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
  186. pci_read_config_dword(dev, vsec + 0x0, &val);
  187. show_reg("Cap ID", (val >> 0) & 0xffff);
  188. show_reg("Cap Ver", (val >> 16) & 0xf);
  189. show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
  190. pci_read_config_dword(dev, vsec + 0x4, &val);
  191. show_reg("VSEC ID", (val >> 0) & 0xffff);
  192. show_reg("VSEC Rev", (val >> 16) & 0xf);
  193. show_reg("VSEC Length", (val >> 20) & 0xfff);
  194. pci_read_config_dword(dev, vsec + 0x8, &val);
  195. show_reg("Num AFUs", (val >> 0) & 0xff);
  196. show_reg("Status", (val >> 8) & 0xff);
  197. show_reg("Mode Control", (val >> 16) & 0xff);
  198. show_reg("Reserved", (val >> 24) & 0xff);
  199. pci_read_config_dword(dev, vsec + 0xc, &val);
  200. show_reg("PSL Rev", (val >> 0) & 0xffff);
  201. show_reg("CAIA Ver", (val >> 16) & 0xffff);
  202. pci_read_config_dword(dev, vsec + 0x10, &val);
  203. show_reg("Base Image Rev", (val >> 0) & 0xffff);
  204. show_reg("Reserved", (val >> 16) & 0x0fff);
  205. show_reg("Image Control", (val >> 28) & 0x3);
  206. show_reg("Reserved", (val >> 30) & 0x1);
  207. show_reg("Image Loaded", (val >> 31) & 0x1);
  208. pci_read_config_dword(dev, vsec + 0x14, &val);
  209. show_reg("Reserved", val);
  210. pci_read_config_dword(dev, vsec + 0x18, &val);
  211. show_reg("Reserved", val);
  212. pci_read_config_dword(dev, vsec + 0x1c, &val);
  213. show_reg("Reserved", val);
  214. pci_read_config_dword(dev, vsec + 0x20, &val);
  215. show_reg("AFU Descriptor Offset", val);
  216. pci_read_config_dword(dev, vsec + 0x24, &val);
  217. show_reg("AFU Descriptor Size", val);
  218. pci_read_config_dword(dev, vsec + 0x28, &val);
  219. show_reg("Problem State Offset", val);
  220. pci_read_config_dword(dev, vsec + 0x2c, &val);
  221. show_reg("Problem State Size", val);
  222. pci_read_config_dword(dev, vsec + 0x30, &val);
  223. show_reg("Reserved", val);
  224. pci_read_config_dword(dev, vsec + 0x34, &val);
  225. show_reg("Reserved", val);
  226. pci_read_config_dword(dev, vsec + 0x38, &val);
  227. show_reg("Reserved", val);
  228. pci_read_config_dword(dev, vsec + 0x3c, &val);
  229. show_reg("Reserved", val);
  230. pci_read_config_dword(dev, vsec + 0x40, &val);
  231. show_reg("PSL Programming Port", val);
  232. pci_read_config_dword(dev, vsec + 0x44, &val);
  233. show_reg("PSL Programming Control", val);
  234. pci_read_config_dword(dev, vsec + 0x48, &val);
  235. show_reg("Reserved", val);
  236. pci_read_config_dword(dev, vsec + 0x4c, &val);
  237. show_reg("Reserved", val);
  238. pci_read_config_dword(dev, vsec + 0x50, &val);
  239. show_reg("Flash Address Register", val);
  240. pci_read_config_dword(dev, vsec + 0x54, &val);
  241. show_reg("Flash Size Register", val);
  242. pci_read_config_dword(dev, vsec + 0x58, &val);
  243. show_reg("Flash Status/Control Register", val);
  244. pci_read_config_dword(dev, vsec + 0x58, &val);
  245. show_reg("Flash Data Port", val);
  246. #undef show_reg
  247. }
  248. static void dump_afu_descriptor(struct cxl_afu *afu)
  249. {
  250. u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
  251. int i;
  252. #define show_reg(name, what) \
  253. dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
  254. val = AFUD_READ_INFO(afu);
  255. show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
  256. show_reg("num_of_processes", AFUD_NUM_PROCS(val));
  257. show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
  258. show_reg("req_prog_mode", val & 0xffffULL);
  259. afu_cr_num = AFUD_NUM_CRS(val);
  260. val = AFUD_READ(afu, 0x8);
  261. show_reg("Reserved", val);
  262. val = AFUD_READ(afu, 0x10);
  263. show_reg("Reserved", val);
  264. val = AFUD_READ(afu, 0x18);
  265. show_reg("Reserved", val);
  266. val = AFUD_READ_CR(afu);
  267. show_reg("Reserved", (val >> (63-7)) & 0xff);
  268. show_reg("AFU_CR_len", AFUD_CR_LEN(val));
  269. afu_cr_len = AFUD_CR_LEN(val) * 256;
  270. val = AFUD_READ_CR_OFF(afu);
  271. afu_cr_off = val;
  272. show_reg("AFU_CR_offset", val);
  273. val = AFUD_READ_PPPSA(afu);
  274. show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
  275. show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
  276. val = AFUD_READ_PPPSA_OFF(afu);
  277. show_reg("PerProcessPSA_offset", val);
  278. val = AFUD_READ_EB(afu);
  279. show_reg("Reserved", (val >> (63-7)) & 0xff);
  280. show_reg("AFU_EB_len", AFUD_EB_LEN(val));
  281. val = AFUD_READ_EB_OFF(afu);
  282. show_reg("AFU_EB_offset", val);
  283. for (i = 0; i < afu_cr_num; i++) {
  284. val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
  285. show_reg("CR Vendor", val & 0xffff);
  286. show_reg("CR Device", (val >> 16) & 0xffff);
  287. }
  288. #undef show_reg
  289. }
  290. static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
  291. {
  292. struct device_node *np;
  293. const __be32 *prop;
  294. u64 psl_dsnctl;
  295. u64 chipid;
  296. if (!(np = pnv_pci_get_phb_node(dev)))
  297. return -ENODEV;
  298. while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
  299. np = of_get_next_parent(np);
  300. if (!np)
  301. return -ENODEV;
  302. chipid = be32_to_cpup(prop);
  303. of_node_put(np);
  304. /* Tell PSL where to route data to */
  305. psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5));
  306. cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
  307. cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
  308. /* snoop write mask */
  309. cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
  310. /* set fir_accum */
  311. cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
  312. /* for debugging with trace arrays */
  313. cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
  314. return 0;
  315. }
  316. #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
  317. #define _2048_250MHZ_CYCLES 1
  318. static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
  319. {
  320. u64 psl_tb;
  321. int delta;
  322. unsigned int retry = 0;
  323. struct device_node *np;
  324. if (!(np = pnv_pci_get_phb_node(dev)))
  325. return -ENODEV;
  326. /* Do not fail when CAPP timebase sync is not supported by OPAL */
  327. of_node_get(np);
  328. if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
  329. of_node_put(np);
  330. pr_err("PSL: Timebase sync: OPAL support missing\n");
  331. return 0;
  332. }
  333. of_node_put(np);
  334. /*
  335. * Setup PSL Timebase Control and Status register
  336. * with the recommended Timebase Sync Count value
  337. */
  338. cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
  339. TBSYNC_CNT(2 * _2048_250MHZ_CYCLES));
  340. /* Enable PSL Timebase */
  341. cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
  342. cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
  343. /* Wait until CORE TB and PSL TB difference <= 16usecs */
  344. do {
  345. msleep(1);
  346. if (retry++ > 5) {
  347. pr_err("PSL: Timebase sync: giving up!\n");
  348. return -EIO;
  349. }
  350. psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
  351. delta = mftb() - psl_tb;
  352. if (delta < 0)
  353. delta = -delta;
  354. } while (tb_to_ns(delta) > 16000);
  355. return 0;
  356. }
  357. static int init_implementation_afu_regs(struct cxl_afu *afu)
  358. {
  359. /* read/write masks for this slice */
  360. cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
  361. /* APC read/write masks for this slice */
  362. cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
  363. /* for debugging with trace arrays */
  364. cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
  365. cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
  366. return 0;
  367. }
  368. int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
  369. unsigned int virq)
  370. {
  371. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  372. return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
  373. }
  374. int cxl_update_image_control(struct cxl *adapter)
  375. {
  376. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  377. int rc;
  378. int vsec;
  379. u8 image_state;
  380. if (!(vsec = find_cxl_vsec(dev))) {
  381. dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
  382. return -ENODEV;
  383. }
  384. if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
  385. dev_err(&dev->dev, "failed to read image state: %i\n", rc);
  386. return rc;
  387. }
  388. if (adapter->perst_loads_image)
  389. image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
  390. else
  391. image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
  392. if (adapter->perst_select_user)
  393. image_state |= CXL_VSEC_PERST_SELECT_USER;
  394. else
  395. image_state &= ~CXL_VSEC_PERST_SELECT_USER;
  396. if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
  397. dev_err(&dev->dev, "failed to update image control: %i\n", rc);
  398. return rc;
  399. }
  400. return 0;
  401. }
  402. int cxl_alloc_one_irq(struct cxl *adapter)
  403. {
  404. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  405. return pnv_cxl_alloc_hwirqs(dev, 1);
  406. }
  407. void cxl_release_one_irq(struct cxl *adapter, int hwirq)
  408. {
  409. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  410. return pnv_cxl_release_hwirqs(dev, hwirq, 1);
  411. }
  412. int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num)
  413. {
  414. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  415. return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
  416. }
  417. void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter)
  418. {
  419. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  420. pnv_cxl_release_hwirq_ranges(irqs, dev);
  421. }
  422. static int setup_cxl_bars(struct pci_dev *dev)
  423. {
  424. /* Safety check in case we get backported to < 3.17 without M64 */
  425. if ((p1_base(dev) < 0x100000000ULL) ||
  426. (p2_base(dev) < 0x100000000ULL)) {
  427. dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
  428. return -ENODEV;
  429. }
  430. /*
  431. * BAR 4/5 has a special meaning for CXL and must be programmed with a
  432. * special value corresponding to the CXL protocol address range.
  433. * For POWER 8 that means bits 48:49 must be set to 10
  434. */
  435. pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
  436. pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
  437. return 0;
  438. }
  439. /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
  440. static int switch_card_to_cxl(struct pci_dev *dev)
  441. {
  442. int vsec;
  443. u8 val;
  444. int rc;
  445. dev_info(&dev->dev, "switch card to CXL\n");
  446. if (!(vsec = find_cxl_vsec(dev))) {
  447. dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
  448. return -ENODEV;
  449. }
  450. if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
  451. dev_err(&dev->dev, "failed to read current mode control: %i", rc);
  452. return rc;
  453. }
  454. val &= ~CXL_VSEC_PROTOCOL_MASK;
  455. val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
  456. if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
  457. dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
  458. return rc;
  459. }
  460. /*
  461. * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
  462. * we must wait 100ms after this mode switch before touching
  463. * PCIe config space.
  464. */
  465. msleep(100);
  466. return 0;
  467. }
  468. static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
  469. {
  470. u64 p1n_base, p2n_base, afu_desc;
  471. const u64 p1n_size = 0x100;
  472. const u64 p2n_size = 0x1000;
  473. p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
  474. p2n_base = p2_base(dev) + (afu->slice * p2n_size);
  475. afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size));
  476. afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size);
  477. if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size)))
  478. goto err;
  479. if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
  480. goto err1;
  481. if (afu_desc) {
  482. if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size)))
  483. goto err2;
  484. }
  485. return 0;
  486. err2:
  487. iounmap(afu->p2n_mmio);
  488. err1:
  489. iounmap(afu->p1n_mmio);
  490. err:
  491. dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
  492. return -ENOMEM;
  493. }
  494. static void cxl_unmap_slice_regs(struct cxl_afu *afu)
  495. {
  496. if (afu->p2n_mmio) {
  497. iounmap(afu->p2n_mmio);
  498. afu->p2n_mmio = NULL;
  499. }
  500. if (afu->p1n_mmio) {
  501. iounmap(afu->p1n_mmio);
  502. afu->p1n_mmio = NULL;
  503. }
  504. if (afu->afu_desc_mmio) {
  505. iounmap(afu->afu_desc_mmio);
  506. afu->afu_desc_mmio = NULL;
  507. }
  508. }
  509. static void cxl_release_afu(struct device *dev)
  510. {
  511. struct cxl_afu *afu = to_cxl_afu(dev);
  512. pr_devel("cxl_release_afu\n");
  513. idr_destroy(&afu->contexts_idr);
  514. cxl_release_spa(afu);
  515. kfree(afu);
  516. }
  517. static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
  518. {
  519. struct cxl_afu *afu;
  520. if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
  521. return NULL;
  522. afu->adapter = adapter;
  523. afu->dev.parent = &adapter->dev;
  524. afu->dev.release = cxl_release_afu;
  525. afu->slice = slice;
  526. idr_init(&afu->contexts_idr);
  527. mutex_init(&afu->contexts_lock);
  528. spin_lock_init(&afu->afu_cntl_lock);
  529. mutex_init(&afu->spa_mutex);
  530. afu->prefault_mode = CXL_PREFAULT_NONE;
  531. afu->irqs_max = afu->adapter->user_irqs;
  532. return afu;
  533. }
  534. /* Expects AFU struct to have recently been zeroed out */
  535. static int cxl_read_afu_descriptor(struct cxl_afu *afu)
  536. {
  537. u64 val;
  538. val = AFUD_READ_INFO(afu);
  539. afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
  540. afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
  541. afu->crs_num = AFUD_NUM_CRS(val);
  542. if (AFUD_AFU_DIRECTED(val))
  543. afu->modes_supported |= CXL_MODE_DIRECTED;
  544. if (AFUD_DEDICATED_PROCESS(val))
  545. afu->modes_supported |= CXL_MODE_DEDICATED;
  546. if (AFUD_TIME_SLICED(val))
  547. afu->modes_supported |= CXL_MODE_TIME_SLICED;
  548. val = AFUD_READ_PPPSA(afu);
  549. afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
  550. afu->psa = AFUD_PPPSA_PSA(val);
  551. if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
  552. afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
  553. val = AFUD_READ_CR(afu);
  554. afu->crs_len = AFUD_CR_LEN(val) * 256;
  555. afu->crs_offset = AFUD_READ_CR_OFF(afu);
  556. /* eb_len is in multiple of 4K */
  557. afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
  558. afu->eb_offset = AFUD_READ_EB_OFF(afu);
  559. /* eb_off is 4K aligned so lower 12 bits are always zero */
  560. if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
  561. dev_warn(&afu->dev,
  562. "Invalid AFU error buffer offset %Lx\n",
  563. afu->eb_offset);
  564. dev_info(&afu->dev,
  565. "Ignoring AFU error buffer in the descriptor\n");
  566. /* indicate that no afu buffer exists */
  567. afu->eb_len = 0;
  568. }
  569. return 0;
  570. }
  571. static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
  572. {
  573. int i;
  574. if (afu->psa && afu->adapter->ps_size <
  575. (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
  576. dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
  577. return -ENODEV;
  578. }
  579. if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
  580. dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
  581. for (i = 0; i < afu->crs_num; i++) {
  582. if ((cxl_afu_cr_read32(afu, i, 0) == 0)) {
  583. dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
  584. return -EINVAL;
  585. }
  586. }
  587. return 0;
  588. }
  589. static int sanitise_afu_regs(struct cxl_afu *afu)
  590. {
  591. u64 reg;
  592. /*
  593. * Clear out any regs that contain either an IVTE or address or may be
  594. * waiting on an acknowledgement to try to be a bit safer as we bring
  595. * it online
  596. */
  597. reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  598. if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
  599. dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
  600. if (__cxl_afu_reset(afu))
  601. return -EIO;
  602. if (cxl_afu_disable(afu))
  603. return -EIO;
  604. if (cxl_psl_purge(afu))
  605. return -EIO;
  606. }
  607. cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
  608. cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
  609. cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
  610. cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
  611. cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
  612. cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
  613. cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
  614. cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
  615. cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
  616. cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
  617. cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
  618. reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  619. if (reg) {
  620. dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
  621. if (reg & CXL_PSL_DSISR_TRANS)
  622. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
  623. else
  624. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
  625. }
  626. reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
  627. if (reg) {
  628. if (reg & ~0xffff)
  629. dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
  630. cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
  631. }
  632. reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
  633. if (reg) {
  634. dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
  635. cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
  636. }
  637. return 0;
  638. }
  639. #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
  640. /*
  641. * afu_eb_read:
  642. * Called from sysfs and reads the afu error info buffer. The h/w only supports
  643. * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
  644. * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
  645. */
  646. ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
  647. loff_t off, size_t count)
  648. {
  649. loff_t aligned_start, aligned_end;
  650. size_t aligned_length;
  651. void *tbuf;
  652. const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;
  653. if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
  654. return 0;
  655. /* calculate aligned read window */
  656. count = min((size_t)(afu->eb_len - off), count);
  657. aligned_start = round_down(off, 8);
  658. aligned_end = round_up(off + count, 8);
  659. aligned_length = aligned_end - aligned_start;
  660. /* max we can copy in one read is PAGE_SIZE */
  661. if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
  662. aligned_length = ERR_BUFF_MAX_COPY_SIZE;
  663. count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
  664. }
  665. /* use bounce buffer for copy */
  666. tbuf = (void *)__get_free_page(GFP_TEMPORARY);
  667. if (!tbuf)
  668. return -ENOMEM;
  669. /* perform aligned read from the mmio region */
  670. memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
  671. memcpy(buf, tbuf + (off & 0x7), count);
  672. free_page((unsigned long)tbuf);
  673. return count;
  674. }
  675. static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
  676. {
  677. int rc;
  678. if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
  679. return rc;
  680. if ((rc = sanitise_afu_regs(afu)))
  681. goto err1;
  682. /* We need to reset the AFU before we can read the AFU descriptor */
  683. if ((rc = __cxl_afu_reset(afu)))
  684. goto err1;
  685. if (cxl_verbose)
  686. dump_afu_descriptor(afu);
  687. if ((rc = cxl_read_afu_descriptor(afu)))
  688. goto err1;
  689. if ((rc = cxl_afu_descriptor_looks_ok(afu)))
  690. goto err1;
  691. if ((rc = init_implementation_afu_regs(afu)))
  692. goto err1;
  693. if ((rc = cxl_register_serr_irq(afu)))
  694. goto err1;
  695. if ((rc = cxl_register_psl_irq(afu)))
  696. goto err2;
  697. return 0;
  698. err2:
  699. cxl_release_serr_irq(afu);
  700. err1:
  701. cxl_unmap_slice_regs(afu);
  702. return rc;
  703. }
  704. static void cxl_deconfigure_afu(struct cxl_afu *afu)
  705. {
  706. cxl_release_psl_irq(afu);
  707. cxl_release_serr_irq(afu);
  708. cxl_unmap_slice_regs(afu);
  709. }
  710. static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
  711. {
  712. struct cxl_afu *afu;
  713. int rc;
  714. afu = cxl_alloc_afu(adapter, slice);
  715. if (!afu)
  716. return -ENOMEM;
  717. rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
  718. if (rc)
  719. goto err_free;
  720. rc = cxl_configure_afu(afu, adapter, dev);
  721. if (rc)
  722. goto err_free;
  723. /* Don't care if this fails */
  724. cxl_debugfs_afu_add(afu);
  725. /*
  726. * After we call this function we must not free the afu directly, even
  727. * if it returns an error!
  728. */
  729. if ((rc = cxl_register_afu(afu)))
  730. goto err_put1;
  731. if ((rc = cxl_sysfs_afu_add(afu)))
  732. goto err_put1;
  733. adapter->afu[afu->slice] = afu;
  734. if ((rc = cxl_pci_vphb_add(afu)))
  735. dev_info(&afu->dev, "Can't register vPHB\n");
  736. return 0;
  737. err_put1:
  738. cxl_deconfigure_afu(afu);
  739. cxl_debugfs_afu_remove(afu);
  740. device_unregister(&afu->dev);
  741. return rc;
  742. err_free:
  743. kfree(afu);
  744. return rc;
  745. }
  746. static void cxl_remove_afu(struct cxl_afu *afu)
  747. {
  748. pr_devel("cxl_remove_afu\n");
  749. if (!afu)
  750. return;
  751. cxl_sysfs_afu_remove(afu);
  752. cxl_debugfs_afu_remove(afu);
  753. spin_lock(&afu->adapter->afu_list_lock);
  754. afu->adapter->afu[afu->slice] = NULL;
  755. spin_unlock(&afu->adapter->afu_list_lock);
  756. cxl_context_detach_all(afu);
  757. cxl_afu_deactivate_mode(afu);
  758. cxl_deconfigure_afu(afu);
  759. device_unregister(&afu->dev);
  760. }
  761. int cxl_reset(struct cxl *adapter)
  762. {
  763. struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
  764. int rc;
  765. if (adapter->perst_same_image) {
  766. dev_warn(&dev->dev,
  767. "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
  768. return -EINVAL;
  769. }
  770. dev_info(&dev->dev, "CXL reset\n");
  771. /* pcie_warm_reset requests a fundamental pci reset which includes a
  772. * PERST assert/deassert. PERST triggers a loading of the image
  773. * if "user" or "factory" is selected in sysfs */
  774. if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
  775. dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
  776. return rc;
  777. }
  778. return rc;
  779. }
  780. static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
  781. {
  782. if (pci_request_region(dev, 2, "priv 2 regs"))
  783. goto err1;
  784. if (pci_request_region(dev, 0, "priv 1 regs"))
  785. goto err2;
  786. pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
  787. p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
  788. if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
  789. goto err3;
  790. if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
  791. goto err4;
  792. return 0;
  793. err4:
  794. iounmap(adapter->p1_mmio);
  795. adapter->p1_mmio = NULL;
  796. err3:
  797. pci_release_region(dev, 0);
  798. err2:
  799. pci_release_region(dev, 2);
  800. err1:
  801. return -ENOMEM;
  802. }
  803. static void cxl_unmap_adapter_regs(struct cxl *adapter)
  804. {
  805. if (adapter->p1_mmio) {
  806. iounmap(adapter->p1_mmio);
  807. adapter->p1_mmio = NULL;
  808. pci_release_region(to_pci_dev(adapter->dev.parent), 2);
  809. }
  810. if (adapter->p2_mmio) {
  811. iounmap(adapter->p2_mmio);
  812. adapter->p2_mmio = NULL;
  813. pci_release_region(to_pci_dev(adapter->dev.parent), 0);
  814. }
  815. }
  816. static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
  817. {
  818. int vsec;
  819. u32 afu_desc_off, afu_desc_size;
  820. u32 ps_off, ps_size;
  821. u16 vseclen;
  822. u8 image_state;
  823. if (!(vsec = find_cxl_vsec(dev))) {
  824. dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
  825. return -ENODEV;
  826. }
  827. CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
  828. if (vseclen < CXL_VSEC_MIN_SIZE) {
  829. dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
  830. return -EINVAL;
  831. }
  832. CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
  833. CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
  834. CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
  835. CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
  836. CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
  837. CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
  838. adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
  839. adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
  840. CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
  841. CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
  842. CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
  843. CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
  844. CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
  845. /* Convert everything to bytes, because there is NO WAY I'd look at the
  846. * code a month later and forget what units these are in ;-) */
  847. adapter->ps_off = ps_off * 64 * 1024;
  848. adapter->ps_size = ps_size * 64 * 1024;
  849. adapter->afu_desc_off = afu_desc_off * 64 * 1024;
  850. adapter->afu_desc_size = afu_desc_size *64 * 1024;
  851. /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
  852. adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
  853. return 0;
  854. }
  855. /*
  856. * Workaround a PCIe Host Bridge defect on some cards, that can cause
  857. * malformed Transaction Layer Packet (TLP) errors to be erroneously
  858. * reported. Mask this error in the Uncorrectable Error Mask Register.
  859. *
  860. * The upper nibble of the PSL revision is used to distinguish between
  861. * different cards. The affected ones have it set to 0.
  862. */
  863. static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
  864. {
  865. int aer;
  866. u32 data;
  867. if (adapter->psl_rev & 0xf000)
  868. return;
  869. if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
  870. return;
  871. pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
  872. if (data & PCI_ERR_UNC_MALF_TLP)
  873. if (data & PCI_ERR_UNC_INTN)
  874. return;
  875. data |= PCI_ERR_UNC_MALF_TLP;
  876. data |= PCI_ERR_UNC_INTN;
  877. pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
  878. }
  879. static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
  880. {
  881. if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
  882. return -EBUSY;
  883. if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
  884. dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
  885. return -EINVAL;
  886. }
  887. if (!adapter->slices) {
  888. /* Once we support dynamic reprogramming we can use the card if
  889. * it supports loadable AFUs */
  890. dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
  891. return -EINVAL;
  892. }
  893. if (!adapter->afu_desc_off || !adapter->afu_desc_size) {
  894. dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
  895. return -EINVAL;
  896. }
  897. if (adapter->ps_size > p2_size(dev) - adapter->ps_off) {
  898. dev_err(&dev->dev, "ABORTING: Problem state size larger than "
  899. "available in BAR2: 0x%llx > 0x%llx\n",
  900. adapter->ps_size, p2_size(dev) - adapter->ps_off);
  901. return -EINVAL;
  902. }
  903. return 0;
  904. }
  905. static void cxl_release_adapter(struct device *dev)
  906. {
  907. struct cxl *adapter = to_cxl_adapter(dev);
  908. pr_devel("cxl_release_adapter\n");
  909. cxl_remove_adapter_nr(adapter);
  910. kfree(adapter);
  911. }
  912. static struct cxl *cxl_alloc_adapter(void)
  913. {
  914. struct cxl *adapter;
  915. if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
  916. return NULL;
  917. spin_lock_init(&adapter->afu_list_lock);
  918. if (cxl_alloc_adapter_nr(adapter))
  919. goto err1;
  920. if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
  921. goto err2;
  922. return adapter;
  923. err2:
  924. cxl_remove_adapter_nr(adapter);
  925. err1:
  926. kfree(adapter);
  927. return NULL;
  928. }
  929. #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
  930. static int sanitise_adapter_regs(struct cxl *adapter)
  931. {
  932. /* Clear PSL tberror bit by writing 1 to it */
  933. cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
  934. return cxl_tlb_slb_invalidate(adapter);
  935. }
  936. /* This should contain *only* operations that can safely be done in
  937. * both creation and recovery.
  938. */
  939. static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
  940. {
  941. int rc;
  942. adapter->dev.parent = &dev->dev;
  943. adapter->dev.release = cxl_release_adapter;
  944. pci_set_drvdata(dev, adapter);
  945. rc = pci_enable_device(dev);
  946. if (rc) {
  947. dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
  948. return rc;
  949. }
  950. if ((rc = cxl_read_vsec(adapter, dev)))
  951. return rc;
  952. if ((rc = cxl_vsec_looks_ok(adapter, dev)))
  953. return rc;
  954. cxl_fixup_malformed_tlp(adapter, dev);
  955. if ((rc = setup_cxl_bars(dev)))
  956. return rc;
  957. if ((rc = switch_card_to_cxl(dev)))
  958. return rc;
  959. if ((rc = cxl_update_image_control(adapter)))
  960. return rc;
  961. if ((rc = cxl_map_adapter_regs(adapter, dev)))
  962. return rc;
  963. if ((rc = sanitise_adapter_regs(adapter)))
  964. goto err;
  965. if ((rc = init_implementation_adapter_regs(adapter, dev)))
  966. goto err;
  967. if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
  968. goto err;
  969. /* If recovery happened, the last step is to turn on snooping.
  970. * In the non-recovery case this has no effect */
  971. if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
  972. goto err;
  973. if ((rc = cxl_setup_psl_timebase(adapter, dev)))
  974. goto err;
  975. if ((rc = cxl_register_psl_err_irq(adapter)))
  976. goto err;
  977. return 0;
  978. err:
  979. cxl_unmap_adapter_regs(adapter);
  980. return rc;
  981. }
  982. static void cxl_deconfigure_adapter(struct cxl *adapter)
  983. {
  984. struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
  985. cxl_release_psl_err_irq(adapter);
  986. cxl_unmap_adapter_regs(adapter);
  987. pci_disable_device(pdev);
  988. }
  989. static struct cxl *cxl_init_adapter(struct pci_dev *dev)
  990. {
  991. struct cxl *adapter;
  992. int rc;
  993. adapter = cxl_alloc_adapter();
  994. if (!adapter)
  995. return ERR_PTR(-ENOMEM);
  996. /* Set defaults for parameters which need to persist over
  997. * configure/reconfigure
  998. */
  999. adapter->perst_loads_image = true;
  1000. adapter->perst_same_image = false;
  1001. rc = cxl_configure_adapter(adapter, dev);
  1002. if (rc) {
  1003. pci_disable_device(dev);
  1004. cxl_release_adapter(&adapter->dev);
  1005. return ERR_PTR(rc);
  1006. }
  1007. /* Don't care if this one fails: */
  1008. cxl_debugfs_adapter_add(adapter);
  1009. /*
  1010. * After we call this function we must not free the adapter directly,
  1011. * even if it returns an error!
  1012. */
  1013. if ((rc = cxl_register_adapter(adapter)))
  1014. goto err_put1;
  1015. if ((rc = cxl_sysfs_adapter_add(adapter)))
  1016. goto err_put1;
  1017. return adapter;
  1018. err_put1:
  1019. /* This should mirror cxl_remove_adapter, except without the
  1020. * sysfs parts
  1021. */
  1022. cxl_debugfs_adapter_remove(adapter);
  1023. cxl_deconfigure_adapter(adapter);
  1024. device_unregister(&adapter->dev);
  1025. return ERR_PTR(rc);
  1026. }
  1027. static void cxl_remove_adapter(struct cxl *adapter)
  1028. {
  1029. pr_devel("cxl_remove_adapter\n");
  1030. cxl_sysfs_adapter_remove(adapter);
  1031. cxl_debugfs_adapter_remove(adapter);
  1032. cxl_deconfigure_adapter(adapter);
  1033. device_unregister(&adapter->dev);
  1034. }
  1035. static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
  1036. {
  1037. struct cxl *adapter;
  1038. int slice;
  1039. int rc;
  1040. if (cxl_verbose)
  1041. dump_cxl_config_space(dev);
  1042. adapter = cxl_init_adapter(dev);
  1043. if (IS_ERR(adapter)) {
  1044. dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
  1045. return PTR_ERR(adapter);
  1046. }
  1047. for (slice = 0; slice < adapter->slices; slice++) {
  1048. if ((rc = cxl_init_afu(adapter, slice, dev))) {
  1049. dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
  1050. continue;
  1051. }
  1052. rc = cxl_afu_select_best_mode(adapter->afu[slice]);
  1053. if (rc)
  1054. dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
  1055. }
  1056. return 0;
  1057. }
  1058. static void cxl_remove(struct pci_dev *dev)
  1059. {
  1060. struct cxl *adapter = pci_get_drvdata(dev);
  1061. struct cxl_afu *afu;
  1062. int i;
  1063. /*
  1064. * Lock to prevent someone grabbing a ref through the adapter list as
  1065. * we are removing it
  1066. */
  1067. for (i = 0; i < adapter->slices; i++) {
  1068. afu = adapter->afu[i];
  1069. cxl_pci_vphb_remove(afu);
  1070. cxl_remove_afu(afu);
  1071. }
  1072. cxl_remove_adapter(adapter);
  1073. }
  1074. static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
  1075. pci_channel_state_t state)
  1076. {
  1077. struct pci_dev *afu_dev;
  1078. pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
  1079. pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
  1080. /* There should only be one entry, but go through the list
  1081. * anyway
  1082. */
  1083. if (afu->phb == NULL)
  1084. return result;
  1085. list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
  1086. if (!afu_dev->driver)
  1087. continue;
  1088. afu_dev->error_state = state;
  1089. if (afu_dev->driver->err_handler)
  1090. afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
  1091. state);
  1092. /* Disconnect trumps all, NONE trumps NEED_RESET */
  1093. if (afu_result == PCI_ERS_RESULT_DISCONNECT)
  1094. result = PCI_ERS_RESULT_DISCONNECT;
  1095. else if ((afu_result == PCI_ERS_RESULT_NONE) &&
  1096. (result == PCI_ERS_RESULT_NEED_RESET))
  1097. result = PCI_ERS_RESULT_NONE;
  1098. }
  1099. return result;
  1100. }
  1101. static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
  1102. pci_channel_state_t state)
  1103. {
  1104. struct cxl *adapter = pci_get_drvdata(pdev);
  1105. struct cxl_afu *afu;
  1106. pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
  1107. int i;
  1108. /* At this point, we could still have an interrupt pending.
  1109. * Let's try to get them out of the way before they do
  1110. * anything we don't like.
  1111. */
  1112. schedule();
  1113. /* If we're permanently dead, give up. */
  1114. if (state == pci_channel_io_perm_failure) {
  1115. /* Tell the AFU drivers; but we don't care what they
  1116. * say, we're going away.
  1117. */
  1118. for (i = 0; i < adapter->slices; i++) {
  1119. afu = adapter->afu[i];
  1120. /*
  1121. * Tell the AFU drivers; but we don't care what they
  1122. * say, we're going away.
  1123. */
  1124. cxl_vphb_error_detected(afu, state);
  1125. }
  1126. return PCI_ERS_RESULT_DISCONNECT;
  1127. }
  1128. /* Are we reflashing?
  1129. *
  1130. * If we reflash, we could come back as something entirely
  1131. * different, including a non-CAPI card. As such, by default
  1132. * we don't participate in the process. We'll be unbound and
  1133. * the slot re-probed. (TODO: check EEH doesn't blindly rebind
  1134. * us!)
  1135. *
  1136. * However, this isn't the entire story: for reliablity
  1137. * reasons, we usually want to reflash the FPGA on PERST in
  1138. * order to get back to a more reliable known-good state.
  1139. *
  1140. * This causes us a bit of a problem: if we reflash we can't
  1141. * trust that we'll come back the same - we could have a new
  1142. * image and been PERSTed in order to load that
  1143. * image. However, most of the time we actually *will* come
  1144. * back the same - for example a regular EEH event.
  1145. *
  1146. * Therefore, we allow the user to assert that the image is
  1147. * indeed the same and that we should continue on into EEH
  1148. * anyway.
  1149. */
  1150. if (adapter->perst_loads_image && !adapter->perst_same_image) {
  1151. /* TODO take the PHB out of CXL mode */
  1152. dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
  1153. return PCI_ERS_RESULT_NONE;
  1154. }
  1155. /*
  1156. * At this point, we want to try to recover. We'll always
  1157. * need a complete slot reset: we don't trust any other reset.
  1158. *
  1159. * Now, we go through each AFU:
  1160. * - We send the driver, if bound, an error_detected callback.
  1161. * We expect it to clean up, but it can also tell us to give
  1162. * up and permanently detach the card. To simplify things, if
  1163. * any bound AFU driver doesn't support EEH, we give up on EEH.
  1164. *
  1165. * - We detach all contexts associated with the AFU. This
  1166. * does not free them, but puts them into a CLOSED state
  1167. * which causes any the associated files to return useful
  1168. * errors to userland. It also unmaps, but does not free,
  1169. * any IRQs.
  1170. *
  1171. * - We clean up our side: releasing and unmapping resources we hold
  1172. * so we can wire them up again when the hardware comes back up.
  1173. *
  1174. * Driver authors should note:
  1175. *
  1176. * - Any contexts you create in your kernel driver (except
  1177. * those associated with anonymous file descriptors) are
  1178. * your responsibility to free and recreate. Likewise with
  1179. * any attached resources.
  1180. *
  1181. * - We will take responsibility for re-initialising the
  1182. * device context (the one set up for you in
  1183. * cxl_pci_enable_device_hook and accessed through
  1184. * cxl_get_context). If you've attached IRQs or other
  1185. * resources to it, they remains yours to free.
  1186. *
  1187. * You can call the same functions to release resources as you
  1188. * normally would: we make sure that these functions continue
  1189. * to work when the hardware is down.
  1190. *
  1191. * Two examples:
  1192. *
  1193. * 1) If you normally free all your resources at the end of
  1194. * each request, or if you use anonymous FDs, your
  1195. * error_detected callback can simply set a flag to tell
  1196. * your driver not to start any new calls. You can then
  1197. * clear the flag in the resume callback.
  1198. *
  1199. * 2) If you normally allocate your resources on startup:
  1200. * * Set a flag in error_detected as above.
  1201. * * Let CXL detach your contexts.
  1202. * * In slot_reset, free the old resources and allocate new ones.
  1203. * * In resume, clear the flag to allow things to start.
  1204. */
  1205. for (i = 0; i < adapter->slices; i++) {
  1206. afu = adapter->afu[i];
  1207. result = cxl_vphb_error_detected(afu, state);
  1208. /* Only continue if everyone agrees on NEED_RESET */
  1209. if (result != PCI_ERS_RESULT_NEED_RESET)
  1210. return result;
  1211. cxl_context_detach_all(afu);
  1212. cxl_afu_deactivate_mode(afu);
  1213. cxl_deconfigure_afu(afu);
  1214. }
  1215. cxl_deconfigure_adapter(adapter);
  1216. return result;
  1217. }
  1218. static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
  1219. {
  1220. struct cxl *adapter = pci_get_drvdata(pdev);
  1221. struct cxl_afu *afu;
  1222. struct cxl_context *ctx;
  1223. struct pci_dev *afu_dev;
  1224. pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
  1225. pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
  1226. int i;
  1227. if (cxl_configure_adapter(adapter, pdev))
  1228. goto err;
  1229. for (i = 0; i < adapter->slices; i++) {
  1230. afu = adapter->afu[i];
  1231. if (cxl_configure_afu(afu, adapter, pdev))
  1232. goto err;
  1233. if (cxl_afu_select_best_mode(afu))
  1234. goto err;
  1235. if (afu->phb == NULL)
  1236. continue;
  1237. cxl_pci_vphb_reconfigure(afu);
  1238. list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
  1239. /* Reset the device context.
  1240. * TODO: make this less disruptive
  1241. */
  1242. ctx = cxl_get_context(afu_dev);
  1243. if (ctx && cxl_release_context(ctx))
  1244. goto err;
  1245. ctx = cxl_dev_context_init(afu_dev);
  1246. if (!ctx)
  1247. goto err;
  1248. afu_dev->dev.archdata.cxl_ctx = ctx;
  1249. if (cxl_afu_check_and_enable(afu))
  1250. goto err;
  1251. afu_dev->error_state = pci_channel_io_normal;
  1252. /* If there's a driver attached, allow it to
  1253. * chime in on recovery. Drivers should check
  1254. * if everything has come back OK, but
  1255. * shouldn't start new work until we call
  1256. * their resume function.
  1257. */
  1258. if (!afu_dev->driver)
  1259. continue;
  1260. if (afu_dev->driver->err_handler &&
  1261. afu_dev->driver->err_handler->slot_reset)
  1262. afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
  1263. if (afu_result == PCI_ERS_RESULT_DISCONNECT)
  1264. result = PCI_ERS_RESULT_DISCONNECT;
  1265. }
  1266. }
  1267. return result;
  1268. err:
  1269. /* All the bits that happen in both error_detected and cxl_remove
  1270. * should be idempotent, so we don't need to worry about leaving a mix
  1271. * of unconfigured and reconfigured resources.
  1272. */
  1273. dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
  1274. return PCI_ERS_RESULT_DISCONNECT;
  1275. }
  1276. static void cxl_pci_resume(struct pci_dev *pdev)
  1277. {
  1278. struct cxl *adapter = pci_get_drvdata(pdev);
  1279. struct cxl_afu *afu;
  1280. struct pci_dev *afu_dev;
  1281. int i;
  1282. /* Everything is back now. Drivers should restart work now.
  1283. * This is not the place to be checking if everything came back up
  1284. * properly, because there's no return value: do that in slot_reset.
  1285. */
  1286. for (i = 0; i < adapter->slices; i++) {
  1287. afu = adapter->afu[i];
  1288. if (afu->phb == NULL)
  1289. continue;
  1290. list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
  1291. if (afu_dev->driver && afu_dev->driver->err_handler &&
  1292. afu_dev->driver->err_handler->resume)
  1293. afu_dev->driver->err_handler->resume(afu_dev);
  1294. }
  1295. }
  1296. }
  1297. static const struct pci_error_handlers cxl_err_handler = {
  1298. .error_detected = cxl_pci_error_detected,
  1299. .slot_reset = cxl_pci_slot_reset,
  1300. .resume = cxl_pci_resume,
  1301. };
  1302. struct pci_driver cxl_pci_driver = {
  1303. .name = "cxl-pci",
  1304. .id_table = cxl_pci_tbl,
  1305. .probe = cxl_probe,
  1306. .remove = cxl_remove,
  1307. .shutdown = cxl_remove,
  1308. .err_handler = &cxl_err_handler,
  1309. };