intel_guc_loader.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*
  2. * Copyright © 2014 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Vinit Azad <vinit.azad@intel.com>
  25. * Ben Widawsky <ben@bwidawsk.net>
  26. * Dave Gordon <david.s.gordon@intel.com>
  27. * Alex Dai <yu.dai@intel.com>
  28. */
  29. #include <linux/firmware.h>
  30. #include "i915_drv.h"
  31. #include "intel_guc.h"
  32. /**
  33. * DOC: GuC
  34. *
  35. * intel_guc:
  36. * Top level structure of guc. It handles firmware loading and manages client
  37. * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
  38. * ExecList submission.
  39. *
  40. * Firmware versioning:
  41. * The firmware build process will generate a version header file with major and
  42. * minor version defined. The versions are built into CSS header of firmware.
  43. * i915 kernel driver set the minimal firmware version required per platform.
  44. * The firmware installation package will install (symbolic link) proper version
  45. * of firmware.
  46. *
  47. * GuC address space:
  48. * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
  49. * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
  50. * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
  51. * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
  52. *
  53. * Firmware log:
  54. * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
  55. * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
  56. * i915_guc_load_status will print out firmware loading status and scratch
  57. * registers value.
  58. *
  59. */
  60. #define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
  61. MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
  62. /* User-friendly representation of an enum */
  63. const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
  64. {
  65. switch (status) {
  66. case GUC_FIRMWARE_FAIL:
  67. return "FAIL";
  68. case GUC_FIRMWARE_NONE:
  69. return "NONE";
  70. case GUC_FIRMWARE_PENDING:
  71. return "PENDING";
  72. case GUC_FIRMWARE_SUCCESS:
  73. return "SUCCESS";
  74. default:
  75. return "UNKNOWN!";
  76. }
  77. };
  78. static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
  79. {
  80. struct intel_engine_cs *ring;
  81. int i, irqs;
  82. /* tell all command streamers NOT to forward interrupts and vblank to GuC */
  83. irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
  84. irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
  85. for_each_ring(ring, dev_priv, i)
  86. I915_WRITE(RING_MODE_GEN7(ring), irqs);
  87. /* route all GT interrupts to the host */
  88. I915_WRITE(GUC_BCS_RCS_IER, 0);
  89. I915_WRITE(GUC_VCS2_VCS1_IER, 0);
  90. I915_WRITE(GUC_WD_VECS_IER, 0);
  91. }
  92. static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
  93. {
  94. struct intel_engine_cs *ring;
  95. int i, irqs;
  96. /* tell all command streamers to forward interrupts and vblank to GuC */
  97. irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
  98. irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
  99. for_each_ring(ring, dev_priv, i)
  100. I915_WRITE(RING_MODE_GEN7(ring), irqs);
  101. /* route USER_INTERRUPT to Host, all others are sent to GuC. */
  102. irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
  103. GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
  104. /* These three registers have the same bit definitions */
  105. I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
  106. I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
  107. I915_WRITE(GUC_WD_VECS_IER, ~irqs);
  108. }
  109. static u32 get_gttype(struct drm_i915_private *dev_priv)
  110. {
  111. /* XXX: GT type based on PCI device ID? field seems unused by fw */
  112. return 0;
  113. }
  114. static u32 get_core_family(struct drm_i915_private *dev_priv)
  115. {
  116. switch (INTEL_INFO(dev_priv)->gen) {
  117. case 9:
  118. return GFXCORE_FAMILY_GEN9;
  119. default:
  120. DRM_ERROR("GUC: unsupported core family\n");
  121. return GFXCORE_FAMILY_UNKNOWN;
  122. }
  123. }
  124. static void set_guc_init_params(struct drm_i915_private *dev_priv)
  125. {
  126. struct intel_guc *guc = &dev_priv->guc;
  127. u32 params[GUC_CTL_MAX_DWORDS];
  128. int i;
  129. memset(&params, 0, sizeof(params));
  130. params[GUC_CTL_DEVICE_INFO] |=
  131. (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
  132. (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
  133. /*
  134. * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
  135. * second. This ARAR is calculated by:
  136. * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
  137. */
  138. params[GUC_CTL_ARAT_HIGH] = 0;
  139. params[GUC_CTL_ARAT_LOW] = 100000000;
  140. params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
  141. params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
  142. GUC_CTL_VCS2_ENABLED;
  143. if (i915.guc_log_level >= 0) {
  144. params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
  145. params[GUC_CTL_DEBUG] =
  146. i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
  147. }
  148. /* If GuC submission is enabled, set up additional parameters here */
  149. if (i915.enable_guc_submission) {
  150. u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
  151. u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
  152. pgs >>= PAGE_SHIFT;
  153. params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
  154. (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
  155. params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
  156. /* Unmask this bit to enable the GuC's internal scheduler */
  157. params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
  158. }
  159. I915_WRITE(SOFT_SCRATCH(0), 0);
  160. for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
  161. I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
  162. }
  163. /*
  164. * Read the GuC status register (GUC_STATUS) and store it in the
  165. * specified location; then return a boolean indicating whether
  166. * the value matches either of two values representing completion
  167. * of the GuC boot process.
  168. *
  169. * This is used for polling the GuC status in a wait_for_atomic()
  170. * loop below.
  171. */
  172. static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
  173. u32 *status)
  174. {
  175. u32 val = I915_READ(GUC_STATUS);
  176. u32 uk_val = val & GS_UKERNEL_MASK;
  177. *status = val;
  178. return (uk_val == GS_UKERNEL_READY ||
  179. ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
  180. }
  181. /*
  182. * Transfer the firmware image to RAM for execution by the microcontroller.
  183. *
  184. * GuC Firmware layout:
  185. * +-------------------------------+ ----
  186. * | CSS header | 128B
  187. * | contains major/minor version |
  188. * +-------------------------------+ ----
  189. * | uCode |
  190. * +-------------------------------+ ----
  191. * | RSA signature | 256B
  192. * +-------------------------------+ ----
  193. *
  194. * Architecturally, the DMA engine is bidirectional, and can potentially even
  195. * transfer between GTT locations. This functionality is left out of the API
  196. * for now as there is no need for it.
  197. *
  198. * Note that GuC needs the CSS header plus uKernel code to be copied by the
  199. * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
  200. */
  201. #define UOS_CSS_HEADER_OFFSET 0
  202. #define UOS_VER_MINOR_OFFSET 0x44
  203. #define UOS_VER_MAJOR_OFFSET 0x46
  204. #define UOS_CSS_HEADER_SIZE 0x80
  205. #define UOS_RSA_SIG_SIZE 0x100
  206. static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
  207. {
  208. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  209. struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
  210. unsigned long offset;
  211. struct sg_table *sg = fw_obj->pages;
  212. u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)];
  213. int i, ret = 0;
  214. /* uCode size, also is where RSA signature starts */
  215. offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE;
  216. I915_WRITE(DMA_COPY_SIZE, ucode_size);
  217. /* Copy RSA signature from the fw image to HW for verification */
  218. sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
  219. for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
  220. I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
  221. /* Set the source address for the new blob */
  222. offset = i915_gem_obj_ggtt_offset(fw_obj);
  223. I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
  224. I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
  225. /*
  226. * Set the DMA destination. Current uCode expects the code to be
  227. * loaded at 8k; locations below this are used for the stack.
  228. */
  229. I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
  230. I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
  231. /* Finally start the DMA */
  232. I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
  233. /*
  234. * Spin-wait for the DMA to complete & the GuC to start up.
  235. * NB: Docs recommend not using the interrupt for completion.
  236. * Measurements indicate this should take no more than 20ms, so a
  237. * timeout here indicates that the GuC has failed and is unusable.
  238. * (Higher levels of the driver will attempt to fall back to
  239. * execlist mode if this happens.)
  240. */
  241. ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
  242. DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
  243. I915_READ(DMA_CTRL), status);
  244. if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
  245. DRM_ERROR("GuC firmware signature verification failed\n");
  246. ret = -ENOEXEC;
  247. }
  248. DRM_DEBUG_DRIVER("returning %d\n", ret);
  249. return ret;
  250. }
  251. /*
  252. * Load the GuC firmware blob into the MinuteIA.
  253. */
  254. static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
  255. {
  256. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  257. struct drm_device *dev = dev_priv->dev;
  258. int ret;
  259. ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
  260. if (ret) {
  261. DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
  262. return ret;
  263. }
  264. ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
  265. if (ret) {
  266. DRM_DEBUG_DRIVER("pin failed %d\n", ret);
  267. return ret;
  268. }
  269. /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
  270. I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
  271. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  272. /* init WOPCM */
  273. I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
  274. I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
  275. /* Enable MIA caching. GuC clock gating is disabled. */
  276. I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
  277. /* WaDisableMinuteIaClockGating:skl,bxt */
  278. if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
  279. (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
  280. I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
  281. ~GUC_ENABLE_MIA_CLOCK_GATING));
  282. }
  283. /* WaC6DisallowByGfxPause*/
  284. I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
  285. if (IS_BROXTON(dev))
  286. I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
  287. else
  288. I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
  289. if (IS_GEN9(dev)) {
  290. /* DOP Clock Gating Enable for GuC clocks */
  291. I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
  292. I915_READ(GEN7_MISCCPCTL)));
  293. /* allows for 5us before GT can go to RC6 */
  294. I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
  295. }
  296. set_guc_init_params(dev_priv);
  297. ret = guc_ucode_xfer_dma(dev_priv);
  298. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  299. /*
  300. * We keep the object pages for reuse during resume. But we can unpin it
  301. * now that DMA has completed, so it doesn't continue to take up space.
  302. */
  303. i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
  304. return ret;
  305. }
  306. /**
  307. * intel_guc_ucode_load() - load GuC uCode into the device
  308. * @dev: drm device
  309. *
  310. * Called from gem_init_hw() during driver loading and also after a GPU reset.
  311. *
  312. * The firmware image should have already been fetched into memory by the
  313. * earlier call to intel_guc_ucode_init(), so here we need only check that
  314. * is succeeded, and then transfer the image to the h/w.
  315. *
  316. * Return: non-zero code on error
  317. */
  318. int intel_guc_ucode_load(struct drm_device *dev)
  319. {
  320. struct drm_i915_private *dev_priv = dev->dev_private;
  321. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  322. int err = 0;
  323. DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
  324. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
  325. intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
  326. direct_interrupts_to_host(dev_priv);
  327. if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
  328. return 0;
  329. if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
  330. guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
  331. return -ENOEXEC;
  332. guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
  333. DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
  334. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
  335. switch (guc_fw->guc_fw_fetch_status) {
  336. case GUC_FIRMWARE_FAIL:
  337. /* something went wrong :( */
  338. err = -EIO;
  339. goto fail;
  340. case GUC_FIRMWARE_NONE:
  341. case GUC_FIRMWARE_PENDING:
  342. default:
  343. /* "can't happen" */
  344. WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
  345. guc_fw->guc_fw_path,
  346. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
  347. guc_fw->guc_fw_fetch_status);
  348. err = -ENXIO;
  349. goto fail;
  350. case GUC_FIRMWARE_SUCCESS:
  351. break;
  352. }
  353. err = i915_guc_submission_init(dev);
  354. if (err)
  355. goto fail;
  356. err = guc_ucode_xfer(dev_priv);
  357. if (err)
  358. goto fail;
  359. guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
  360. DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
  361. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
  362. intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
  363. if (i915.enable_guc_submission) {
  364. /* The execbuf_client will be recreated. Release it first. */
  365. i915_guc_submission_disable(dev);
  366. err = i915_guc_submission_enable(dev);
  367. if (err)
  368. goto fail;
  369. direct_interrupts_to_guc(dev_priv);
  370. }
  371. return 0;
  372. fail:
  373. if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
  374. guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
  375. direct_interrupts_to_host(dev_priv);
  376. i915_guc_submission_disable(dev);
  377. return err;
  378. }
  379. static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
  380. {
  381. struct drm_i915_gem_object *obj;
  382. const struct firmware *fw;
  383. const u8 *css_header;
  384. const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE;
  385. const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
  386. - 0x8000; /* 32k reserved (8K stack + 24k context) */
  387. int err;
  388. DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
  389. intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
  390. err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
  391. if (err)
  392. goto fail;
  393. if (!fw)
  394. goto fail;
  395. DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
  396. guc_fw->guc_fw_path, fw);
  397. DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
  398. fw->size, minsize, maxsize);
  399. /* Check the size of the blob befoe examining buffer contents */
  400. if (fw->size < minsize || fw->size > maxsize)
  401. goto fail;
  402. /*
  403. * The GuC firmware image has the version number embedded at a well-known
  404. * offset within the firmware blob; note that major / minor version are
  405. * TWO bytes each (i.e. u16), although all pointers and offsets are defined
  406. * in terms of bytes (u8).
  407. */
  408. css_header = fw->data + UOS_CSS_HEADER_OFFSET;
  409. guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET);
  410. guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
  411. if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
  412. guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
  413. DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
  414. guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
  415. guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
  416. err = -ENOEXEC;
  417. goto fail;
  418. }
  419. DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
  420. guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
  421. guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
  422. mutex_lock(&dev->struct_mutex);
  423. obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
  424. mutex_unlock(&dev->struct_mutex);
  425. if (IS_ERR_OR_NULL(obj)) {
  426. err = obj ? PTR_ERR(obj) : -ENOMEM;
  427. goto fail;
  428. }
  429. guc_fw->guc_fw_obj = obj;
  430. guc_fw->guc_fw_size = fw->size;
  431. DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
  432. guc_fw->guc_fw_obj);
  433. release_firmware(fw);
  434. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
  435. return;
  436. fail:
  437. DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
  438. err, fw, guc_fw->guc_fw_obj);
  439. DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
  440. guc_fw->guc_fw_path, err);
  441. obj = guc_fw->guc_fw_obj;
  442. if (obj)
  443. drm_gem_object_unreference(&obj->base);
  444. guc_fw->guc_fw_obj = NULL;
  445. release_firmware(fw); /* OK even if fw is NULL */
  446. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
  447. }
  448. /**
  449. * intel_guc_ucode_init() - define parameters and fetch firmware
  450. * @dev: drm device
  451. *
  452. * Called early during driver load, but after GEM is initialised.
  453. *
  454. * The firmware will be transferred to the GuC's memory later,
  455. * when intel_guc_ucode_load() is called.
  456. */
  457. void intel_guc_ucode_init(struct drm_device *dev)
  458. {
  459. struct drm_i915_private *dev_priv = dev->dev_private;
  460. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  461. const char *fw_path;
  462. if (!HAS_GUC_SCHED(dev))
  463. i915.enable_guc_submission = false;
  464. if (!HAS_GUC_UCODE(dev)) {
  465. fw_path = NULL;
  466. } else if (IS_SKYLAKE(dev)) {
  467. fw_path = I915_SKL_GUC_UCODE;
  468. guc_fw->guc_fw_major_wanted = 4;
  469. guc_fw->guc_fw_minor_wanted = 3;
  470. } else {
  471. i915.enable_guc_submission = false;
  472. fw_path = ""; /* unknown device */
  473. }
  474. guc_fw->guc_dev = dev;
  475. guc_fw->guc_fw_path = fw_path;
  476. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
  477. guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
  478. if (fw_path == NULL)
  479. return;
  480. if (*fw_path == '\0') {
  481. DRM_ERROR("No GuC firmware known for this platform\n");
  482. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
  483. return;
  484. }
  485. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
  486. DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
  487. guc_fw_fetch(dev, guc_fw);
  488. /* status must now be FAIL or SUCCESS */
  489. }
  490. /**
  491. * intel_guc_ucode_fini() - clean up all allocated resources
  492. * @dev: drm device
  493. */
  494. void intel_guc_ucode_fini(struct drm_device *dev)
  495. {
  496. struct drm_i915_private *dev_priv = dev->dev_private;
  497. struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
  498. direct_interrupts_to_host(dev_priv);
  499. i915_guc_submission_fini(dev);
  500. mutex_lock(&dev->struct_mutex);
  501. if (guc_fw->guc_fw_obj)
  502. drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
  503. guc_fw->guc_fw_obj = NULL;
  504. mutex_unlock(&dev->struct_mutex);
  505. guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
  506. }