i915_dma.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334
  1. /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  2. */
  3. /*
  4. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/async.h>
  30. #include <drm/drmP.h>
  31. #include <drm/drm_crtc_helper.h>
  32. #include <drm/drm_fb_helper.h>
  33. #include <drm/drm_legacy.h>
  34. #include "intel_drv.h"
  35. #include <drm/i915_drm.h>
  36. #include "i915_drv.h"
  37. #include "i915_vgpu.h"
  38. #include "i915_trace.h"
  39. #include <linux/pci.h>
  40. #include <linux/console.h>
  41. #include <linux/vt.h>
  42. #include <linux/vgaarb.h>
  43. #include <linux/acpi.h>
  44. #include <linux/pnp.h>
  45. #include <linux/vga_switcheroo.h>
  46. #include <linux/slab.h>
  47. #include <acpi/video.h>
  48. #include <linux/pm.h>
  49. #include <linux/pm_runtime.h>
  50. #include <linux/oom.h>
  51. static int i915_getparam(struct drm_device *dev, void *data,
  52. struct drm_file *file_priv)
  53. {
  54. struct drm_i915_private *dev_priv = dev->dev_private;
  55. drm_i915_getparam_t *param = data;
  56. int value;
  57. switch (param->param) {
  58. case I915_PARAM_IRQ_ACTIVE:
  59. case I915_PARAM_ALLOW_BATCHBUFFER:
  60. case I915_PARAM_LAST_DISPATCH:
  61. /* Reject all old ums/dri params. */
  62. return -ENODEV;
  63. case I915_PARAM_CHIPSET_ID:
  64. value = dev->pdev->device;
  65. break;
  66. case I915_PARAM_REVISION:
  67. value = dev->pdev->revision;
  68. break;
  69. case I915_PARAM_HAS_GEM:
  70. value = 1;
  71. break;
  72. case I915_PARAM_NUM_FENCES_AVAIL:
  73. value = dev_priv->num_fence_regs;
  74. break;
  75. case I915_PARAM_HAS_OVERLAY:
  76. value = dev_priv->overlay ? 1 : 0;
  77. break;
  78. case I915_PARAM_HAS_PAGEFLIPPING:
  79. value = 1;
  80. break;
  81. case I915_PARAM_HAS_EXECBUF2:
  82. /* depends on GEM */
  83. value = 1;
  84. break;
  85. case I915_PARAM_HAS_BSD:
  86. value = intel_ring_initialized(&dev_priv->ring[VCS]);
  87. break;
  88. case I915_PARAM_HAS_BLT:
  89. value = intel_ring_initialized(&dev_priv->ring[BCS]);
  90. break;
  91. case I915_PARAM_HAS_VEBOX:
  92. value = intel_ring_initialized(&dev_priv->ring[VECS]);
  93. break;
  94. case I915_PARAM_HAS_BSD2:
  95. value = intel_ring_initialized(&dev_priv->ring[VCS2]);
  96. break;
  97. case I915_PARAM_HAS_RELAXED_FENCING:
  98. value = 1;
  99. break;
  100. case I915_PARAM_HAS_COHERENT_RINGS:
  101. value = 1;
  102. break;
  103. case I915_PARAM_HAS_EXEC_CONSTANTS:
  104. value = INTEL_INFO(dev)->gen >= 4;
  105. break;
  106. case I915_PARAM_HAS_RELAXED_DELTA:
  107. value = 1;
  108. break;
  109. case I915_PARAM_HAS_GEN7_SOL_RESET:
  110. value = 1;
  111. break;
  112. case I915_PARAM_HAS_LLC:
  113. value = HAS_LLC(dev);
  114. break;
  115. case I915_PARAM_HAS_WT:
  116. value = HAS_WT(dev);
  117. break;
  118. case I915_PARAM_HAS_ALIASING_PPGTT:
  119. value = USES_PPGTT(dev);
  120. break;
  121. case I915_PARAM_HAS_WAIT_TIMEOUT:
  122. value = 1;
  123. break;
  124. case I915_PARAM_HAS_SEMAPHORES:
  125. value = i915_semaphore_is_enabled(dev);
  126. break;
  127. case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
  128. value = 1;
  129. break;
  130. case I915_PARAM_HAS_SECURE_BATCHES:
  131. value = capable(CAP_SYS_ADMIN);
  132. break;
  133. case I915_PARAM_HAS_PINNED_BATCHES:
  134. value = 1;
  135. break;
  136. case I915_PARAM_HAS_EXEC_NO_RELOC:
  137. value = 1;
  138. break;
  139. case I915_PARAM_HAS_EXEC_HANDLE_LUT:
  140. value = 1;
  141. break;
  142. case I915_PARAM_CMD_PARSER_VERSION:
  143. value = i915_cmd_parser_get_version();
  144. break;
  145. case I915_PARAM_HAS_COHERENT_PHYS_GTT:
  146. value = 1;
  147. break;
  148. case I915_PARAM_MMAP_VERSION:
  149. value = 1;
  150. break;
  151. case I915_PARAM_SUBSLICE_TOTAL:
  152. value = INTEL_INFO(dev)->subslice_total;
  153. if (!value)
  154. return -ENODEV;
  155. break;
  156. case I915_PARAM_EU_TOTAL:
  157. value = INTEL_INFO(dev)->eu_total;
  158. if (!value)
  159. return -ENODEV;
  160. break;
  161. case I915_PARAM_HAS_GPU_RESET:
  162. value = i915.enable_hangcheck &&
  163. intel_has_gpu_reset(dev);
  164. break;
  165. case I915_PARAM_HAS_RESOURCE_STREAMER:
  166. value = HAS_RESOURCE_STREAMER(dev);
  167. break;
  168. default:
  169. DRM_DEBUG("Unknown parameter %d\n", param->param);
  170. return -EINVAL;
  171. }
  172. if (copy_to_user(param->value, &value, sizeof(int))) {
  173. DRM_ERROR("copy_to_user failed\n");
  174. return -EFAULT;
  175. }
  176. return 0;
  177. }
  178. static int i915_get_bridge_dev(struct drm_device *dev)
  179. {
  180. struct drm_i915_private *dev_priv = dev->dev_private;
  181. dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
  182. if (!dev_priv->bridge_dev) {
  183. DRM_ERROR("bridge device not found\n");
  184. return -1;
  185. }
  186. return 0;
  187. }
  188. #define MCHBAR_I915 0x44
  189. #define MCHBAR_I965 0x48
  190. #define MCHBAR_SIZE (4*4096)
  191. #define DEVEN_REG 0x54
  192. #define DEVEN_MCHBAR_EN (1 << 28)
  193. /* Allocate space for the MCH regs if needed, return nonzero on error */
  194. static int
  195. intel_alloc_mchbar_resource(struct drm_device *dev)
  196. {
  197. struct drm_i915_private *dev_priv = dev->dev_private;
  198. int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  199. u32 temp_lo, temp_hi = 0;
  200. u64 mchbar_addr;
  201. int ret;
  202. if (INTEL_INFO(dev)->gen >= 4)
  203. pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
  204. pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
  205. mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
  206. /* If ACPI doesn't have it, assume we need to allocate it ourselves */
  207. #ifdef CONFIG_PNP
  208. if (mchbar_addr &&
  209. pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
  210. return 0;
  211. #endif
  212. /* Get some space for it */
  213. dev_priv->mch_res.name = "i915 MCHBAR";
  214. dev_priv->mch_res.flags = IORESOURCE_MEM;
  215. ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
  216. &dev_priv->mch_res,
  217. MCHBAR_SIZE, MCHBAR_SIZE,
  218. PCIBIOS_MIN_MEM,
  219. 0, pcibios_align_resource,
  220. dev_priv->bridge_dev);
  221. if (ret) {
  222. DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
  223. dev_priv->mch_res.start = 0;
  224. return ret;
  225. }
  226. if (INTEL_INFO(dev)->gen >= 4)
  227. pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
  228. upper_32_bits(dev_priv->mch_res.start));
  229. pci_write_config_dword(dev_priv->bridge_dev, reg,
  230. lower_32_bits(dev_priv->mch_res.start));
  231. return 0;
  232. }
  233. /* Setup MCHBAR if possible, return true if we should disable it again */
  234. static void
  235. intel_setup_mchbar(struct drm_device *dev)
  236. {
  237. struct drm_i915_private *dev_priv = dev->dev_private;
  238. int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  239. u32 temp;
  240. bool enabled;
  241. if (IS_VALLEYVIEW(dev))
  242. return;
  243. dev_priv->mchbar_need_disable = false;
  244. if (IS_I915G(dev) || IS_I915GM(dev)) {
  245. pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  246. enabled = !!(temp & DEVEN_MCHBAR_EN);
  247. } else {
  248. pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  249. enabled = temp & 1;
  250. }
  251. /* If it's already enabled, don't have to do anything */
  252. if (enabled)
  253. return;
  254. if (intel_alloc_mchbar_resource(dev))
  255. return;
  256. dev_priv->mchbar_need_disable = true;
  257. /* Space is allocated or reserved, so enable it. */
  258. if (IS_I915G(dev) || IS_I915GM(dev)) {
  259. pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
  260. temp | DEVEN_MCHBAR_EN);
  261. } else {
  262. pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  263. pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
  264. }
  265. }
  266. static void
  267. intel_teardown_mchbar(struct drm_device *dev)
  268. {
  269. struct drm_i915_private *dev_priv = dev->dev_private;
  270. int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  271. u32 temp;
  272. if (dev_priv->mchbar_need_disable) {
  273. if (IS_I915G(dev) || IS_I915GM(dev)) {
  274. pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  275. temp &= ~DEVEN_MCHBAR_EN;
  276. pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
  277. } else {
  278. pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  279. temp &= ~1;
  280. pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
  281. }
  282. }
  283. if (dev_priv->mch_res.start)
  284. release_resource(&dev_priv->mch_res);
  285. }
  286. /* true = enable decode, false = disable decoder */
  287. static unsigned int i915_vga_set_decode(void *cookie, bool state)
  288. {
  289. struct drm_device *dev = cookie;
  290. intel_modeset_vga_set_state(dev, state);
  291. if (state)
  292. return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  293. VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  294. else
  295. return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  296. }
  297. static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
  298. {
  299. struct drm_device *dev = pci_get_drvdata(pdev);
  300. pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
  301. if (state == VGA_SWITCHEROO_ON) {
  302. pr_info("switched on\n");
  303. dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
  304. /* i915 resume handler doesn't set to D0 */
  305. pci_set_power_state(dev->pdev, PCI_D0);
  306. i915_resume_switcheroo(dev);
  307. dev->switch_power_state = DRM_SWITCH_POWER_ON;
  308. } else {
  309. pr_err("switched off\n");
  310. dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
  311. i915_suspend_switcheroo(dev, pmm);
  312. dev->switch_power_state = DRM_SWITCH_POWER_OFF;
  313. }
  314. }
  315. static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
  316. {
  317. struct drm_device *dev = pci_get_drvdata(pdev);
  318. /*
  319. * FIXME: open_count is protected by drm_global_mutex but that would lead to
  320. * locking inversion with the driver load path. And the access here is
  321. * completely racy anyway. So don't bother with locking for now.
  322. */
  323. return dev->open_count == 0;
  324. }
  325. static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
  326. .set_gpu_state = i915_switcheroo_set_state,
  327. .reprobe = NULL,
  328. .can_switch = i915_switcheroo_can_switch,
  329. };
  330. static int i915_load_modeset_init(struct drm_device *dev)
  331. {
  332. struct drm_i915_private *dev_priv = dev->dev_private;
  333. int ret;
  334. ret = intel_parse_bios(dev);
  335. if (ret)
  336. DRM_INFO("failed to find VBIOS tables\n");
  337. /* If we have > 1 VGA cards, then we need to arbitrate access
  338. * to the common VGA resources.
  339. *
  340. * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
  341. * then we do not take part in VGA arbitration and the
  342. * vga_client_register() fails with -ENODEV.
  343. */
  344. ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
  345. if (ret && ret != -ENODEV)
  346. goto out;
  347. intel_register_dsm_handler();
  348. ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
  349. if (ret)
  350. goto cleanup_vga_client;
  351. /* Initialise stolen first so that we may reserve preallocated
  352. * objects for the BIOS to KMS transition.
  353. */
  354. ret = i915_gem_init_stolen(dev);
  355. if (ret)
  356. goto cleanup_vga_switcheroo;
  357. intel_power_domains_init_hw(dev_priv);
  358. ret = intel_irq_install(dev_priv);
  359. if (ret)
  360. goto cleanup_gem_stolen;
  361. intel_setup_gmbus(dev);
  362. /* Important: The output setup functions called by modeset_init need
  363. * working irqs for e.g. gmbus and dp aux transfers. */
  364. intel_modeset_init(dev);
  365. intel_guc_ucode_init(dev);
  366. ret = i915_gem_init(dev);
  367. if (ret)
  368. goto cleanup_irq;
  369. intel_modeset_gem_init(dev);
  370. /* Always safe in the mode setting case. */
  371. /* FIXME: do pre/post-mode set stuff in core KMS code */
  372. dev->vblank_disable_allowed = true;
  373. if (INTEL_INFO(dev)->num_pipes == 0)
  374. return 0;
  375. ret = intel_fbdev_init(dev);
  376. if (ret)
  377. goto cleanup_gem;
  378. /* Only enable hotplug handling once the fbdev is fully set up. */
  379. intel_hpd_init(dev_priv);
  380. /*
  381. * Some ports require correctly set-up hpd registers for detection to
  382. * work properly (leading to ghost connected connector status), e.g. VGA
  383. * on gm45. Hence we can only set up the initial fbdev config after hpd
  384. * irqs are fully enabled. Now we should scan for the initial config
  385. * only once hotplug handling is enabled, but due to screwed-up locking
  386. * around kms/fbdev init we can't protect the fdbev initial config
  387. * scanning against hotplug events. Hence do this first and ignore the
  388. * tiny window where we will loose hotplug notifactions.
  389. */
  390. async_schedule(intel_fbdev_initial_config, dev_priv);
  391. drm_kms_helper_poll_init(dev);
  392. return 0;
  393. cleanup_gem:
  394. mutex_lock(&dev->struct_mutex);
  395. i915_gem_cleanup_ringbuffer(dev);
  396. i915_gem_context_fini(dev);
  397. mutex_unlock(&dev->struct_mutex);
  398. cleanup_irq:
  399. intel_guc_ucode_fini(dev);
  400. drm_irq_uninstall(dev);
  401. intel_teardown_gmbus(dev);
  402. cleanup_gem_stolen:
  403. i915_gem_cleanup_stolen(dev);
  404. cleanup_vga_switcheroo:
  405. vga_switcheroo_unregister_client(dev->pdev);
  406. cleanup_vga_client:
  407. vga_client_register(dev->pdev, NULL, NULL, NULL);
  408. out:
  409. return ret;
  410. }
  411. #if IS_ENABLED(CONFIG_FB)
  412. static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
  413. {
  414. struct apertures_struct *ap;
  415. struct pci_dev *pdev = dev_priv->dev->pdev;
  416. bool primary;
  417. int ret;
  418. ap = alloc_apertures(1);
  419. if (!ap)
  420. return -ENOMEM;
  421. ap->ranges[0].base = dev_priv->gtt.mappable_base;
  422. ap->ranges[0].size = dev_priv->gtt.mappable_end;
  423. primary =
  424. pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
  425. ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
  426. kfree(ap);
  427. return ret;
  428. }
  429. #else
  430. static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
  431. {
  432. return 0;
  433. }
  434. #endif
  435. #if !defined(CONFIG_VGA_CONSOLE)
  436. static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
  437. {
  438. return 0;
  439. }
  440. #elif !defined(CONFIG_DUMMY_CONSOLE)
  441. static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
  442. {
  443. return -ENODEV;
  444. }
  445. #else
  446. static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
  447. {
  448. int ret = 0;
  449. DRM_INFO("Replacing VGA console driver\n");
  450. console_lock();
  451. if (con_is_bound(&vga_con))
  452. ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
  453. if (ret == 0) {
  454. ret = do_unregister_con_driver(&vga_con);
  455. /* Ignore "already unregistered". */
  456. if (ret == -ENODEV)
  457. ret = 0;
  458. }
  459. console_unlock();
  460. return ret;
  461. }
  462. #endif
  463. static void i915_dump_device_info(struct drm_i915_private *dev_priv)
  464. {
  465. const struct intel_device_info *info = &dev_priv->info;
  466. #define PRINT_S(name) "%s"
  467. #define SEP_EMPTY
  468. #define PRINT_FLAG(name) info->name ? #name "," : ""
  469. #define SEP_COMMA ,
  470. DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
  471. DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
  472. info->gen,
  473. dev_priv->dev->pdev->device,
  474. dev_priv->dev->pdev->revision,
  475. DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
  476. #undef PRINT_S
  477. #undef SEP_EMPTY
  478. #undef PRINT_FLAG
  479. #undef SEP_COMMA
  480. }
  481. static void cherryview_sseu_info_init(struct drm_device *dev)
  482. {
  483. struct drm_i915_private *dev_priv = dev->dev_private;
  484. struct intel_device_info *info;
  485. u32 fuse, eu_dis;
  486. info = (struct intel_device_info *)&dev_priv->info;
  487. fuse = I915_READ(CHV_FUSE_GT);
  488. info->slice_total = 1;
  489. if (!(fuse & CHV_FGT_DISABLE_SS0)) {
  490. info->subslice_per_slice++;
  491. eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
  492. CHV_FGT_EU_DIS_SS0_R1_MASK);
  493. info->eu_total += 8 - hweight32(eu_dis);
  494. }
  495. if (!(fuse & CHV_FGT_DISABLE_SS1)) {
  496. info->subslice_per_slice++;
  497. eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
  498. CHV_FGT_EU_DIS_SS1_R1_MASK);
  499. info->eu_total += 8 - hweight32(eu_dis);
  500. }
  501. info->subslice_total = info->subslice_per_slice;
  502. /*
  503. * CHV expected to always have a uniform distribution of EU
  504. * across subslices.
  505. */
  506. info->eu_per_subslice = info->subslice_total ?
  507. info->eu_total / info->subslice_total :
  508. 0;
  509. /*
  510. * CHV supports subslice power gating on devices with more than
  511. * one subslice, and supports EU power gating on devices with
  512. * more than one EU pair per subslice.
  513. */
  514. info->has_slice_pg = 0;
  515. info->has_subslice_pg = (info->subslice_total > 1);
  516. info->has_eu_pg = (info->eu_per_subslice > 2);
  517. }
  518. static void gen9_sseu_info_init(struct drm_device *dev)
  519. {
  520. struct drm_i915_private *dev_priv = dev->dev_private;
  521. struct intel_device_info *info;
  522. int s_max = 3, ss_max = 4, eu_max = 8;
  523. int s, ss;
  524. u32 fuse2, s_enable, ss_disable, eu_disable;
  525. u8 eu_mask = 0xff;
  526. info = (struct intel_device_info *)&dev_priv->info;
  527. fuse2 = I915_READ(GEN8_FUSE2);
  528. s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
  529. GEN8_F2_S_ENA_SHIFT;
  530. ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
  531. GEN9_F2_SS_DIS_SHIFT;
  532. info->slice_total = hweight32(s_enable);
  533. /*
  534. * The subslice disable field is global, i.e. it applies
  535. * to each of the enabled slices.
  536. */
  537. info->subslice_per_slice = ss_max - hweight32(ss_disable);
  538. info->subslice_total = info->slice_total *
  539. info->subslice_per_slice;
  540. /*
  541. * Iterate through enabled slices and subslices to
  542. * count the total enabled EU.
  543. */
  544. for (s = 0; s < s_max; s++) {
  545. if (!(s_enable & (0x1 << s)))
  546. /* skip disabled slice */
  547. continue;
  548. eu_disable = I915_READ(GEN9_EU_DISABLE(s));
  549. for (ss = 0; ss < ss_max; ss++) {
  550. int eu_per_ss;
  551. if (ss_disable & (0x1 << ss))
  552. /* skip disabled subslice */
  553. continue;
  554. eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
  555. eu_mask);
  556. /*
  557. * Record which subslice(s) has(have) 7 EUs. we
  558. * can tune the hash used to spread work among
  559. * subslices if they are unbalanced.
  560. */
  561. if (eu_per_ss == 7)
  562. info->subslice_7eu[s] |= 1 << ss;
  563. info->eu_total += eu_per_ss;
  564. }
  565. }
  566. /*
  567. * SKL is expected to always have a uniform distribution
  568. * of EU across subslices with the exception that any one
  569. * EU in any one subslice may be fused off for die
  570. * recovery. BXT is expected to be perfectly uniform in EU
  571. * distribution.
  572. */
  573. info->eu_per_subslice = info->subslice_total ?
  574. DIV_ROUND_UP(info->eu_total,
  575. info->subslice_total) : 0;
  576. /*
  577. * SKL supports slice power gating on devices with more than
  578. * one slice, and supports EU power gating on devices with
  579. * more than one EU pair per subslice. BXT supports subslice
  580. * power gating on devices with more than one subslice, and
  581. * supports EU power gating on devices with more than one EU
  582. * pair per subslice.
  583. */
  584. info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
  585. info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
  586. info->has_eu_pg = (info->eu_per_subslice > 2);
  587. }
  588. static void broadwell_sseu_info_init(struct drm_device *dev)
  589. {
  590. struct drm_i915_private *dev_priv = dev->dev_private;
  591. struct intel_device_info *info;
  592. const int s_max = 3, ss_max = 3, eu_max = 8;
  593. int s, ss;
  594. u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
  595. fuse2 = I915_READ(GEN8_FUSE2);
  596. s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
  597. ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
  598. eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
  599. eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
  600. ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
  601. (32 - GEN8_EU_DIS0_S1_SHIFT));
  602. eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
  603. ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
  604. (32 - GEN8_EU_DIS1_S2_SHIFT));
  605. info = (struct intel_device_info *)&dev_priv->info;
  606. info->slice_total = hweight32(s_enable);
  607. /*
  608. * The subslice disable field is global, i.e. it applies
  609. * to each of the enabled slices.
  610. */
  611. info->subslice_per_slice = ss_max - hweight32(ss_disable);
  612. info->subslice_total = info->slice_total * info->subslice_per_slice;
  613. /*
  614. * Iterate through enabled slices and subslices to
  615. * count the total enabled EU.
  616. */
  617. for (s = 0; s < s_max; s++) {
  618. if (!(s_enable & (0x1 << s)))
  619. /* skip disabled slice */
  620. continue;
  621. for (ss = 0; ss < ss_max; ss++) {
  622. u32 n_disabled;
  623. if (ss_disable & (0x1 << ss))
  624. /* skip disabled subslice */
  625. continue;
  626. n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
  627. /*
  628. * Record which subslices have 7 EUs.
  629. */
  630. if (eu_max - n_disabled == 7)
  631. info->subslice_7eu[s] |= 1 << ss;
  632. info->eu_total += eu_max - n_disabled;
  633. }
  634. }
  635. /*
  636. * BDW is expected to always have a uniform distribution of EU across
  637. * subslices with the exception that any one EU in any one subslice may
  638. * be fused off for die recovery.
  639. */
  640. info->eu_per_subslice = info->subslice_total ?
  641. DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
  642. /*
  643. * BDW supports slice power gating on devices with more than
  644. * one slice.
  645. */
  646. info->has_slice_pg = (info->slice_total > 1);
  647. info->has_subslice_pg = 0;
  648. info->has_eu_pg = 0;
  649. }
  650. /*
  651. * Determine various intel_device_info fields at runtime.
  652. *
  653. * Use it when either:
  654. * - it's judged too laborious to fill n static structures with the limit
  655. * when a simple if statement does the job,
  656. * - run-time checks (eg read fuse/strap registers) are needed.
  657. *
  658. * This function needs to be called:
  659. * - after the MMIO has been setup as we are reading registers,
  660. * - after the PCH has been detected,
  661. * - before the first usage of the fields it can tweak.
  662. */
  663. static void intel_device_info_runtime_init(struct drm_device *dev)
  664. {
  665. struct drm_i915_private *dev_priv = dev->dev_private;
  666. struct intel_device_info *info;
  667. enum pipe pipe;
  668. info = (struct intel_device_info *)&dev_priv->info;
  669. /*
  670. * Skylake and Broxton currently don't expose the topmost plane as its
  671. * use is exclusive with the legacy cursor and we only want to expose
  672. * one of those, not both. Until we can safely expose the topmost plane
  673. * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
  674. * we don't expose the topmost plane at all to prevent ABI breakage
  675. * down the line.
  676. */
  677. if (IS_BROXTON(dev)) {
  678. info->num_sprites[PIPE_A] = 2;
  679. info->num_sprites[PIPE_B] = 2;
  680. info->num_sprites[PIPE_C] = 1;
  681. } else if (IS_VALLEYVIEW(dev))
  682. for_each_pipe(dev_priv, pipe)
  683. info->num_sprites[pipe] = 2;
  684. else
  685. for_each_pipe(dev_priv, pipe)
  686. info->num_sprites[pipe] = 1;
  687. if (i915.disable_display) {
  688. DRM_INFO("Display disabled (module parameter)\n");
  689. info->num_pipes = 0;
  690. } else if (info->num_pipes > 0 &&
  691. (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
  692. !IS_VALLEYVIEW(dev)) {
  693. u32 fuse_strap = I915_READ(FUSE_STRAP);
  694. u32 sfuse_strap = I915_READ(SFUSE_STRAP);
  695. /*
  696. * SFUSE_STRAP is supposed to have a bit signalling the display
  697. * is fused off. Unfortunately it seems that, at least in
  698. * certain cases, fused off display means that PCH display
  699. * reads don't land anywhere. In that case, we read 0s.
  700. *
  701. * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
  702. * should be set when taking over after the firmware.
  703. */
  704. if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
  705. sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
  706. (dev_priv->pch_type == PCH_CPT &&
  707. !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
  708. DRM_INFO("Display fused off, disabling\n");
  709. info->num_pipes = 0;
  710. }
  711. }
  712. /* Initialize slice/subslice/EU info */
  713. if (IS_CHERRYVIEW(dev))
  714. cherryview_sseu_info_init(dev);
  715. else if (IS_BROADWELL(dev))
  716. broadwell_sseu_info_init(dev);
  717. else if (INTEL_INFO(dev)->gen >= 9)
  718. gen9_sseu_info_init(dev);
  719. DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
  720. DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
  721. DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
  722. DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
  723. DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
  724. DRM_DEBUG_DRIVER("has slice power gating: %s\n",
  725. info->has_slice_pg ? "y" : "n");
  726. DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
  727. info->has_subslice_pg ? "y" : "n");
  728. DRM_DEBUG_DRIVER("has EU power gating: %s\n",
  729. info->has_eu_pg ? "y" : "n");
  730. }
  731. static void intel_init_dpio(struct drm_i915_private *dev_priv)
  732. {
  733. if (!IS_VALLEYVIEW(dev_priv))
  734. return;
  735. /*
  736. * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
  737. * CHV x1 PHY (DP/HDMI D)
  738. * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
  739. */
  740. if (IS_CHERRYVIEW(dev_priv)) {
  741. DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
  742. DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
  743. } else {
  744. DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
  745. }
  746. }
  747. /**
  748. * i915_driver_load - setup chip and create an initial config
  749. * @dev: DRM device
  750. * @flags: startup flags
  751. *
  752. * The driver load routine has to do several things:
  753. * - drive output discovery via intel_modeset_init()
  754. * - initialize the memory manager
  755. * - allocate initial config memory
  756. * - setup the DRM framebuffer with the allocated memory
  757. */
  758. int i915_driver_load(struct drm_device *dev, unsigned long flags)
  759. {
  760. struct drm_i915_private *dev_priv;
  761. struct intel_device_info *info, *device_info;
  762. int ret = 0, mmio_bar, mmio_size;
  763. uint32_t aperture_size;
  764. info = (struct intel_device_info *) flags;
  765. dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  766. if (dev_priv == NULL)
  767. return -ENOMEM;
  768. dev->dev_private = dev_priv;
  769. dev_priv->dev = dev;
  770. /* Setup the write-once "constant" device info */
  771. device_info = (struct intel_device_info *)&dev_priv->info;
  772. memcpy(device_info, info, sizeof(dev_priv->info));
  773. device_info->device_id = dev->pdev->device;
  774. spin_lock_init(&dev_priv->irq_lock);
  775. spin_lock_init(&dev_priv->gpu_error.lock);
  776. mutex_init(&dev_priv->backlight_lock);
  777. spin_lock_init(&dev_priv->uncore.lock);
  778. spin_lock_init(&dev_priv->mm.object_stat_lock);
  779. spin_lock_init(&dev_priv->mmio_flip_lock);
  780. mutex_init(&dev_priv->sb_lock);
  781. mutex_init(&dev_priv->modeset_restore_lock);
  782. mutex_init(&dev_priv->csr_lock);
  783. mutex_init(&dev_priv->av_mutex);
  784. intel_pm_setup(dev);
  785. intel_display_crc_init(dev);
  786. i915_dump_device_info(dev_priv);
  787. /* Not all pre-production machines fall into this category, only the
  788. * very first ones. Almost everything should work, except for maybe
  789. * suspend/resume. And we don't implement workarounds that affect only
  790. * pre-production machines. */
  791. if (IS_HSW_EARLY_SDV(dev))
  792. DRM_INFO("This is an early pre-production Haswell machine. "
  793. "It may not be fully functional.\n");
  794. if (i915_get_bridge_dev(dev)) {
  795. ret = -EIO;
  796. goto free_priv;
  797. }
  798. mmio_bar = IS_GEN2(dev) ? 1 : 0;
  799. /* Before gen4, the registers and the GTT are behind different BARs.
  800. * However, from gen4 onwards, the registers and the GTT are shared
  801. * in the same BAR, so we want to restrict this ioremap from
  802. * clobbering the GTT which we want ioremap_wc instead. Fortunately,
  803. * the register BAR remains the same size for all the earlier
  804. * generations up to Ironlake.
  805. */
  806. if (info->gen < 5)
  807. mmio_size = 512*1024;
  808. else
  809. mmio_size = 2*1024*1024;
  810. dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
  811. if (!dev_priv->regs) {
  812. DRM_ERROR("failed to map registers\n");
  813. ret = -EIO;
  814. goto put_bridge;
  815. }
  816. /* This must be called before any calls to HAS_PCH_* */
  817. intel_detect_pch(dev);
  818. intel_uncore_init(dev);
  819. /* Load CSR Firmware for SKL */
  820. intel_csr_ucode_init(dev);
  821. ret = i915_gem_gtt_init(dev);
  822. if (ret)
  823. goto out_freecsr;
  824. /* WARNING: Apparently we must kick fbdev drivers before vgacon,
  825. * otherwise the vga fbdev driver falls over. */
  826. ret = i915_kick_out_firmware_fb(dev_priv);
  827. if (ret) {
  828. DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
  829. goto out_gtt;
  830. }
  831. ret = i915_kick_out_vgacon(dev_priv);
  832. if (ret) {
  833. DRM_ERROR("failed to remove conflicting VGA console\n");
  834. goto out_gtt;
  835. }
  836. pci_set_master(dev->pdev);
  837. /* overlay on gen2 is broken and can't address above 1G */
  838. if (IS_GEN2(dev))
  839. dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
  840. /* 965GM sometimes incorrectly writes to hardware status page (HWS)
  841. * using 32bit addressing, overwriting memory if HWS is located
  842. * above 4GB.
  843. *
  844. * The documentation also mentions an issue with undefined
  845. * behaviour if any general state is accessed within a page above 4GB,
  846. * which also needs to be handled carefully.
  847. */
  848. if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
  849. dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
  850. aperture_size = dev_priv->gtt.mappable_end;
  851. dev_priv->gtt.mappable =
  852. io_mapping_create_wc(dev_priv->gtt.mappable_base,
  853. aperture_size);
  854. if (dev_priv->gtt.mappable == NULL) {
  855. ret = -EIO;
  856. goto out_gtt;
  857. }
  858. dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
  859. aperture_size);
  860. /* The i915 workqueue is primarily used for batched retirement of
  861. * requests (and thus managing bo) once the task has been completed
  862. * by the GPU. i915_gem_retire_requests() is called directly when we
  863. * need high-priority retirement, such as waiting for an explicit
  864. * bo.
  865. *
  866. * It is also used for periodic low-priority events, such as
  867. * idle-timers and recording error state.
  868. *
  869. * All tasks on the workqueue are expected to acquire the dev mutex
  870. * so there is no point in running more than one instance of the
  871. * workqueue at any time. Use an ordered one.
  872. */
  873. dev_priv->wq = alloc_ordered_workqueue("i915", 0);
  874. if (dev_priv->wq == NULL) {
  875. DRM_ERROR("Failed to create our workqueue.\n");
  876. ret = -ENOMEM;
  877. goto out_mtrrfree;
  878. }
  879. dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
  880. if (dev_priv->hotplug.dp_wq == NULL) {
  881. DRM_ERROR("Failed to create our dp workqueue.\n");
  882. ret = -ENOMEM;
  883. goto out_freewq;
  884. }
  885. dev_priv->gpu_error.hangcheck_wq =
  886. alloc_ordered_workqueue("i915-hangcheck", 0);
  887. if (dev_priv->gpu_error.hangcheck_wq == NULL) {
  888. DRM_ERROR("Failed to create our hangcheck workqueue.\n");
  889. ret = -ENOMEM;
  890. goto out_freedpwq;
  891. }
  892. intel_irq_init(dev_priv);
  893. intel_uncore_sanitize(dev);
  894. /* Try to make sure MCHBAR is enabled before poking at it */
  895. intel_setup_mchbar(dev);
  896. intel_opregion_setup(dev);
  897. i915_gem_load(dev);
  898. /* On the 945G/GM, the chipset reports the MSI capability on the
  899. * integrated graphics even though the support isn't actually there
  900. * according to the published specs. It doesn't appear to function
  901. * correctly in testing on 945G.
  902. * This may be a side effect of MSI having been made available for PEG
  903. * and the registers being closely associated.
  904. *
  905. * According to chipset errata, on the 965GM, MSI interrupts may
  906. * be lost or delayed, but we use them anyways to avoid
  907. * stuck interrupts on some machines.
  908. */
  909. if (!IS_I945G(dev) && !IS_I945GM(dev))
  910. pci_enable_msi(dev->pdev);
  911. intel_device_info_runtime_init(dev);
  912. intel_init_dpio(dev_priv);
  913. if (INTEL_INFO(dev)->num_pipes) {
  914. ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
  915. if (ret)
  916. goto out_gem_unload;
  917. }
  918. intel_power_domains_init(dev_priv);
  919. ret = i915_load_modeset_init(dev);
  920. if (ret < 0) {
  921. DRM_ERROR("failed to init modeset\n");
  922. goto out_power_well;
  923. }
  924. /*
  925. * Notify a valid surface after modesetting,
  926. * when running inside a VM.
  927. */
  928. if (intel_vgpu_active(dev))
  929. I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
  930. i915_setup_sysfs(dev);
  931. if (INTEL_INFO(dev)->num_pipes) {
  932. /* Must be done after probing outputs */
  933. intel_opregion_init(dev);
  934. acpi_video_register();
  935. }
  936. if (IS_GEN5(dev))
  937. intel_gpu_ips_init(dev_priv);
  938. intel_runtime_pm_enable(dev_priv);
  939. i915_audio_component_init(dev_priv);
  940. return 0;
  941. out_power_well:
  942. intel_power_domains_fini(dev_priv);
  943. drm_vblank_cleanup(dev);
  944. out_gem_unload:
  945. WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
  946. unregister_shrinker(&dev_priv->mm.shrinker);
  947. if (dev->pdev->msi_enabled)
  948. pci_disable_msi(dev->pdev);
  949. intel_teardown_mchbar(dev);
  950. pm_qos_remove_request(&dev_priv->pm_qos);
  951. destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
  952. out_freedpwq:
  953. destroy_workqueue(dev_priv->hotplug.dp_wq);
  954. out_freewq:
  955. destroy_workqueue(dev_priv->wq);
  956. out_mtrrfree:
  957. arch_phys_wc_del(dev_priv->gtt.mtrr);
  958. io_mapping_free(dev_priv->gtt.mappable);
  959. out_gtt:
  960. i915_global_gtt_cleanup(dev);
  961. out_freecsr:
  962. intel_csr_ucode_fini(dev);
  963. intel_uncore_fini(dev);
  964. pci_iounmap(dev->pdev, dev_priv->regs);
  965. put_bridge:
  966. pci_dev_put(dev_priv->bridge_dev);
  967. free_priv:
  968. kmem_cache_destroy(dev_priv->requests);
  969. kmem_cache_destroy(dev_priv->vmas);
  970. kmem_cache_destroy(dev_priv->objects);
  971. kfree(dev_priv);
  972. return ret;
  973. }
  974. int i915_driver_unload(struct drm_device *dev)
  975. {
  976. struct drm_i915_private *dev_priv = dev->dev_private;
  977. int ret;
  978. i915_audio_component_cleanup(dev_priv);
  979. ret = i915_gem_suspend(dev);
  980. if (ret) {
  981. DRM_ERROR("failed to idle hardware: %d\n", ret);
  982. return ret;
  983. }
  984. intel_power_domains_fini(dev_priv);
  985. intel_gpu_ips_teardown();
  986. i915_teardown_sysfs(dev);
  987. WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
  988. unregister_shrinker(&dev_priv->mm.shrinker);
  989. io_mapping_free(dev_priv->gtt.mappable);
  990. arch_phys_wc_del(dev_priv->gtt.mtrr);
  991. acpi_video_unregister();
  992. intel_fbdev_fini(dev);
  993. drm_vblank_cleanup(dev);
  994. intel_modeset_cleanup(dev);
  995. /*
  996. * free the memory space allocated for the child device
  997. * config parsed from VBT
  998. */
  999. if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
  1000. kfree(dev_priv->vbt.child_dev);
  1001. dev_priv->vbt.child_dev = NULL;
  1002. dev_priv->vbt.child_dev_num = 0;
  1003. }
  1004. kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
  1005. dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
  1006. kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
  1007. dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
  1008. vga_switcheroo_unregister_client(dev->pdev);
  1009. vga_client_register(dev->pdev, NULL, NULL, NULL);
  1010. /* Free error state after interrupts are fully disabled. */
  1011. cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
  1012. i915_destroy_error_state(dev);
  1013. if (dev->pdev->msi_enabled)
  1014. pci_disable_msi(dev->pdev);
  1015. intel_opregion_fini(dev);
  1016. /* Flush any outstanding unpin_work. */
  1017. flush_workqueue(dev_priv->wq);
  1018. intel_guc_ucode_fini(dev);
  1019. mutex_lock(&dev->struct_mutex);
  1020. i915_gem_cleanup_ringbuffer(dev);
  1021. i915_gem_context_fini(dev);
  1022. mutex_unlock(&dev->struct_mutex);
  1023. intel_fbc_cleanup_cfb(dev_priv);
  1024. i915_gem_cleanup_stolen(dev);
  1025. intel_csr_ucode_fini(dev);
  1026. intel_teardown_mchbar(dev);
  1027. destroy_workqueue(dev_priv->hotplug.dp_wq);
  1028. destroy_workqueue(dev_priv->wq);
  1029. destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
  1030. pm_qos_remove_request(&dev_priv->pm_qos);
  1031. i915_global_gtt_cleanup(dev);
  1032. intel_uncore_fini(dev);
  1033. if (dev_priv->regs != NULL)
  1034. pci_iounmap(dev->pdev, dev_priv->regs);
  1035. kmem_cache_destroy(dev_priv->requests);
  1036. kmem_cache_destroy(dev_priv->vmas);
  1037. kmem_cache_destroy(dev_priv->objects);
  1038. pci_dev_put(dev_priv->bridge_dev);
  1039. kfree(dev_priv);
  1040. return 0;
  1041. }
  1042. int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  1043. {
  1044. int ret;
  1045. ret = i915_gem_open(dev, file);
  1046. if (ret)
  1047. return ret;
  1048. return 0;
  1049. }
  1050. /**
  1051. * i915_driver_lastclose - clean up after all DRM clients have exited
  1052. * @dev: DRM device
  1053. *
  1054. * Take care of cleaning up after all DRM clients have exited. In the
  1055. * mode setting case, we want to restore the kernel's initial mode (just
  1056. * in case the last client left us in a bad state).
  1057. *
  1058. * Additionally, in the non-mode setting case, we'll tear down the GTT
  1059. * and DMA structures, since the kernel won't be using them, and clea
  1060. * up any GEM state.
  1061. */
  1062. void i915_driver_lastclose(struct drm_device *dev)
  1063. {
  1064. intel_fbdev_restore_mode(dev);
  1065. vga_switcheroo_process_delayed_switch();
  1066. }
  1067. void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
  1068. {
  1069. mutex_lock(&dev->struct_mutex);
  1070. i915_gem_context_close(dev, file);
  1071. i915_gem_release(dev, file);
  1072. mutex_unlock(&dev->struct_mutex);
  1073. intel_modeset_preclose(dev, file);
  1074. }
  1075. void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
  1076. {
  1077. struct drm_i915_file_private *file_priv = file->driver_priv;
  1078. if (file_priv && file_priv->bsd_ring)
  1079. file_priv->bsd_ring = NULL;
  1080. kfree(file_priv);
  1081. }
  1082. static int
  1083. i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
  1084. struct drm_file *file)
  1085. {
  1086. return -ENODEV;
  1087. }
  1088. const struct drm_ioctl_desc i915_ioctls[] = {
  1089. DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1090. DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
  1091. DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
  1092. DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
  1093. DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
  1094. DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
  1095. DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
  1096. DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1097. DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
  1098. DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
  1099. DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1100. DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
  1101. DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1102. DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1103. DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
  1104. DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
  1105. DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1106. DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1107. DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
  1108. DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
  1109. DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
  1110. DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
  1111. DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  1112. DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
  1113. DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
  1114. DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  1115. DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1116. DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1117. DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
  1118. DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
  1119. DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
  1120. DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
  1121. DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
  1122. DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
  1123. DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
  1124. DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
  1125. DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
  1126. DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
  1127. DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
  1128. DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
  1129. DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
  1130. DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
  1131. DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
  1132. DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
  1133. DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  1134. DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
  1135. DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
  1136. DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
  1137. DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
  1138. DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
  1139. DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
  1140. DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
  1141. };
  1142. int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);