intel_uncore.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #include "i915_vgpu.h"
  26. #include <linux/pm_runtime.h>
  27. #define FORCEWAKE_ACK_TIMEOUT_MS 50
  28. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  29. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  30. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  31. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  32. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  34. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  35. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  36. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  37. static const char * const forcewake_domain_names[] = {
  38. "render",
  39. "blitter",
  40. "media",
  41. };
  42. const char *
  43. intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
  44. {
  45. BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
  46. if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
  47. return forcewake_domain_names[id];
  48. WARN_ON(id);
  49. return "unknown";
  50. }
  51. static void
  52. assert_device_not_suspended(struct drm_i915_private *dev_priv)
  53. {
  54. WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
  55. "Device suspended\n");
  56. }
  57. static inline void
  58. fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
  59. {
  60. WARN_ON(d->reg_set == 0);
  61. __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
  62. }
  63. static inline void
  64. fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
  65. {
  66. mod_timer_pinned(&d->timer, jiffies + 1);
  67. }
  68. static inline void
  69. fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
  70. {
  71. if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  72. FORCEWAKE_KERNEL) == 0,
  73. FORCEWAKE_ACK_TIMEOUT_MS))
  74. DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
  75. intel_uncore_forcewake_domain_to_str(d->id));
  76. }
  77. static inline void
  78. fw_domain_get(const struct intel_uncore_forcewake_domain *d)
  79. {
  80. __raw_i915_write32(d->i915, d->reg_set, d->val_set);
  81. }
  82. static inline void
  83. fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
  84. {
  85. if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
  86. FORCEWAKE_KERNEL),
  87. FORCEWAKE_ACK_TIMEOUT_MS))
  88. DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
  89. intel_uncore_forcewake_domain_to_str(d->id));
  90. }
  91. static inline void
  92. fw_domain_put(const struct intel_uncore_forcewake_domain *d)
  93. {
  94. __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
  95. }
  96. static inline void
  97. fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
  98. {
  99. /* something from same cacheline, but not from the set register */
  100. if (d->reg_post)
  101. __raw_posting_read(d->i915, d->reg_post);
  102. }
  103. static void
  104. fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  105. {
  106. struct intel_uncore_forcewake_domain *d;
  107. enum forcewake_domain_id id;
  108. for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
  109. fw_domain_wait_ack_clear(d);
  110. fw_domain_get(d);
  111. fw_domain_wait_ack(d);
  112. }
  113. }
  114. static void
  115. fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  116. {
  117. struct intel_uncore_forcewake_domain *d;
  118. enum forcewake_domain_id id;
  119. for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
  120. fw_domain_put(d);
  121. fw_domain_posting_read(d);
  122. }
  123. }
  124. static void
  125. fw_domains_posting_read(struct drm_i915_private *dev_priv)
  126. {
  127. struct intel_uncore_forcewake_domain *d;
  128. enum forcewake_domain_id id;
  129. /* No need to do for all, just do for first found */
  130. for_each_fw_domain(d, dev_priv, id) {
  131. fw_domain_posting_read(d);
  132. break;
  133. }
  134. }
  135. static void
  136. fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
  137. {
  138. struct intel_uncore_forcewake_domain *d;
  139. enum forcewake_domain_id id;
  140. if (dev_priv->uncore.fw_domains == 0)
  141. return;
  142. for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
  143. fw_domain_reset(d);
  144. fw_domains_posting_read(dev_priv);
  145. }
  146. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  147. {
  148. /* w/a for a sporadic read returning 0 by waiting for the GT
  149. * thread to wake up.
  150. */
  151. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
  152. GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
  153. DRM_ERROR("GT thread status wait timed out\n");
  154. }
  155. static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
  156. enum forcewake_domains fw_domains)
  157. {
  158. fw_domains_get(dev_priv, fw_domains);
  159. /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
  160. __gen6_gt_wait_for_thread_c0(dev_priv);
  161. }
  162. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  163. {
  164. u32 gtfifodbg;
  165. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  166. if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
  167. __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
  168. }
  169. static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
  170. enum forcewake_domains fw_domains)
  171. {
  172. fw_domains_put(dev_priv, fw_domains);
  173. gen6_gt_check_fifodbg(dev_priv);
  174. }
  175. static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
  176. {
  177. u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
  178. return count & GT_FIFO_FREE_ENTRIES_MASK;
  179. }
  180. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  181. {
  182. int ret = 0;
  183. /* On VLV, FIFO will be shared by both SW and HW.
  184. * So, we need to read the FREE_ENTRIES everytime */
  185. if (IS_VALLEYVIEW(dev_priv->dev))
  186. dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
  187. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  188. int loop = 500;
  189. u32 fifo = fifo_free_entries(dev_priv);
  190. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  191. udelay(10);
  192. fifo = fifo_free_entries(dev_priv);
  193. }
  194. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  195. ++ret;
  196. dev_priv->uncore.fifo_count = fifo;
  197. }
  198. dev_priv->uncore.fifo_count--;
  199. return ret;
  200. }
  201. static void intel_uncore_fw_release_timer(unsigned long arg)
  202. {
  203. struct intel_uncore_forcewake_domain *domain = (void *)arg;
  204. unsigned long irqflags;
  205. assert_device_not_suspended(domain->i915);
  206. spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
  207. if (WARN_ON(domain->wake_count == 0))
  208. domain->wake_count++;
  209. if (--domain->wake_count == 0)
  210. domain->i915->uncore.funcs.force_wake_put(domain->i915,
  211. 1 << domain->id);
  212. spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
  213. }
  214. void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
  215. {
  216. struct drm_i915_private *dev_priv = dev->dev_private;
  217. unsigned long irqflags;
  218. struct intel_uncore_forcewake_domain *domain;
  219. int retry_count = 100;
  220. enum forcewake_domain_id id;
  221. enum forcewake_domains fw = 0, active_domains;
  222. /* Hold uncore.lock across reset to prevent any register access
  223. * with forcewake not set correctly. Wait until all pending
  224. * timers are run before holding.
  225. */
  226. while (1) {
  227. active_domains = 0;
  228. for_each_fw_domain(domain, dev_priv, id) {
  229. if (del_timer_sync(&domain->timer) == 0)
  230. continue;
  231. intel_uncore_fw_release_timer((unsigned long)domain);
  232. }
  233. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  234. for_each_fw_domain(domain, dev_priv, id) {
  235. if (timer_pending(&domain->timer))
  236. active_domains |= (1 << id);
  237. }
  238. if (active_domains == 0)
  239. break;
  240. if (--retry_count == 0) {
  241. DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
  242. break;
  243. }
  244. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  245. cond_resched();
  246. }
  247. WARN_ON(active_domains);
  248. for_each_fw_domain(domain, dev_priv, id)
  249. if (domain->wake_count)
  250. fw |= 1 << id;
  251. if (fw)
  252. dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
  253. fw_domains_reset(dev_priv, FORCEWAKE_ALL);
  254. if (restore) { /* If reset with a user forcewake, try to restore */
  255. if (fw)
  256. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
  257. if (IS_GEN6(dev) || IS_GEN7(dev))
  258. dev_priv->uncore.fifo_count =
  259. fifo_free_entries(dev_priv);
  260. }
  261. if (!restore)
  262. assert_forcewakes_inactive(dev_priv);
  263. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  264. }
  265. static void intel_uncore_ellc_detect(struct drm_device *dev)
  266. {
  267. struct drm_i915_private *dev_priv = dev->dev_private;
  268. if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
  269. INTEL_INFO(dev)->gen >= 9) &&
  270. (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
  271. /* The docs do not explain exactly how the calculation can be
  272. * made. It is somewhat guessable, but for now, it's always
  273. * 128MB.
  274. * NB: We can't write IDICR yet because we do not have gt funcs
  275. * set up */
  276. dev_priv->ellc_size = 128;
  277. DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  278. }
  279. }
  280. static void __intel_uncore_early_sanitize(struct drm_device *dev,
  281. bool restore_forcewake)
  282. {
  283. struct drm_i915_private *dev_priv = dev->dev_private;
  284. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  285. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  286. /* clear out old GT FIFO errors */
  287. if (IS_GEN6(dev) || IS_GEN7(dev))
  288. __raw_i915_write32(dev_priv, GTFIFODBG,
  289. __raw_i915_read32(dev_priv, GTFIFODBG));
  290. /* WaDisableShadowRegForCpd:chv */
  291. if (IS_CHERRYVIEW(dev)) {
  292. __raw_i915_write32(dev_priv, GTFIFOCTL,
  293. __raw_i915_read32(dev_priv, GTFIFOCTL) |
  294. GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
  295. GT_FIFO_CTL_RC6_POLICY_STALL);
  296. }
  297. intel_uncore_forcewake_reset(dev, restore_forcewake);
  298. }
  299. void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
  300. {
  301. __intel_uncore_early_sanitize(dev, restore_forcewake);
  302. i915_check_and_clear_faults(dev);
  303. }
  304. void intel_uncore_sanitize(struct drm_device *dev)
  305. {
  306. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  307. intel_disable_gt_powersave(dev);
  308. }
  309. static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  310. enum forcewake_domains fw_domains)
  311. {
  312. struct intel_uncore_forcewake_domain *domain;
  313. enum forcewake_domain_id id;
  314. if (!dev_priv->uncore.funcs.force_wake_get)
  315. return;
  316. fw_domains &= dev_priv->uncore.fw_domains;
  317. for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
  318. if (domain->wake_count++)
  319. fw_domains &= ~(1 << id);
  320. }
  321. if (fw_domains)
  322. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  323. }
  324. /**
  325. * intel_uncore_forcewake_get - grab forcewake domain references
  326. * @dev_priv: i915 device instance
  327. * @fw_domains: forcewake domains to get reference on
  328. *
  329. * This function can be used get GT's forcewake domain references.
  330. * Normal register access will handle the forcewake domains automatically.
  331. * However if some sequence requires the GT to not power down a particular
  332. * forcewake domains this function should be called at the beginning of the
  333. * sequence. And subsequently the reference should be dropped by symmetric
  334. * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
  335. * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
  336. */
  337. void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
  338. enum forcewake_domains fw_domains)
  339. {
  340. unsigned long irqflags;
  341. if (!dev_priv->uncore.funcs.force_wake_get)
  342. return;
  343. WARN_ON(dev_priv->pm.suspended);
  344. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  345. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  346. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  347. }
  348. /**
  349. * intel_uncore_forcewake_get__locked - grab forcewake domain references
  350. * @dev_priv: i915 device instance
  351. * @fw_domains: forcewake domains to get reference on
  352. *
  353. * See intel_uncore_forcewake_get(). This variant places the onus
  354. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  355. */
  356. void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
  357. enum forcewake_domains fw_domains)
  358. {
  359. assert_spin_locked(&dev_priv->uncore.lock);
  360. if (!dev_priv->uncore.funcs.force_wake_get)
  361. return;
  362. __intel_uncore_forcewake_get(dev_priv, fw_domains);
  363. }
  364. static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  365. enum forcewake_domains fw_domains)
  366. {
  367. struct intel_uncore_forcewake_domain *domain;
  368. enum forcewake_domain_id id;
  369. if (!dev_priv->uncore.funcs.force_wake_put)
  370. return;
  371. fw_domains &= dev_priv->uncore.fw_domains;
  372. for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
  373. if (WARN_ON(domain->wake_count == 0))
  374. continue;
  375. if (--domain->wake_count)
  376. continue;
  377. domain->wake_count++;
  378. fw_domain_arm_timer(domain);
  379. }
  380. }
  381. /**
  382. * intel_uncore_forcewake_put - release a forcewake domain reference
  383. * @dev_priv: i915 device instance
  384. * @fw_domains: forcewake domains to put references
  385. *
  386. * This function drops the device-level forcewakes for specified
  387. * domains obtained by intel_uncore_forcewake_get().
  388. */
  389. void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
  390. enum forcewake_domains fw_domains)
  391. {
  392. unsigned long irqflags;
  393. if (!dev_priv->uncore.funcs.force_wake_put)
  394. return;
  395. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  396. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  397. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  398. }
  399. /**
  400. * intel_uncore_forcewake_put__locked - grab forcewake domain references
  401. * @dev_priv: i915 device instance
  402. * @fw_domains: forcewake domains to get reference on
  403. *
  404. * See intel_uncore_forcewake_put(). This variant places the onus
  405. * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
  406. */
  407. void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
  408. enum forcewake_domains fw_domains)
  409. {
  410. assert_spin_locked(&dev_priv->uncore.lock);
  411. if (!dev_priv->uncore.funcs.force_wake_put)
  412. return;
  413. __intel_uncore_forcewake_put(dev_priv, fw_domains);
  414. }
  415. void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
  416. {
  417. struct intel_uncore_forcewake_domain *domain;
  418. enum forcewake_domain_id id;
  419. if (!dev_priv->uncore.funcs.force_wake_get)
  420. return;
  421. for_each_fw_domain(domain, dev_priv, id)
  422. WARN_ON(domain->wake_count);
  423. }
  424. /* We give fast paths for the really cool registers */
  425. #define NEEDS_FORCE_WAKE(reg) \
  426. ((reg) < 0x40000 && (reg) != FORCEWAKE)
  427. #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
  428. #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
  429. (REG_RANGE((reg), 0x2000, 0x4000) || \
  430. REG_RANGE((reg), 0x5000, 0x8000) || \
  431. REG_RANGE((reg), 0xB000, 0x12000) || \
  432. REG_RANGE((reg), 0x2E000, 0x30000))
  433. #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
  434. (REG_RANGE((reg), 0x12000, 0x14000) || \
  435. REG_RANGE((reg), 0x22000, 0x24000) || \
  436. REG_RANGE((reg), 0x30000, 0x40000))
  437. #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
  438. (REG_RANGE((reg), 0x2000, 0x4000) || \
  439. REG_RANGE((reg), 0x5200, 0x8000) || \
  440. REG_RANGE((reg), 0x8300, 0x8500) || \
  441. REG_RANGE((reg), 0xB000, 0xB480) || \
  442. REG_RANGE((reg), 0xE000, 0xE800))
  443. #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
  444. (REG_RANGE((reg), 0x8800, 0x8900) || \
  445. REG_RANGE((reg), 0xD000, 0xD800) || \
  446. REG_RANGE((reg), 0x12000, 0x14000) || \
  447. REG_RANGE((reg), 0x1A000, 0x1C000) || \
  448. REG_RANGE((reg), 0x1E800, 0x1EA00) || \
  449. REG_RANGE((reg), 0x30000, 0x38000))
  450. #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
  451. (REG_RANGE((reg), 0x4000, 0x5000) || \
  452. REG_RANGE((reg), 0x8000, 0x8300) || \
  453. REG_RANGE((reg), 0x8500, 0x8600) || \
  454. REG_RANGE((reg), 0x9000, 0xB000) || \
  455. REG_RANGE((reg), 0xF000, 0x10000))
  456. #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
  457. REG_RANGE((reg), 0xB00, 0x2000)
  458. #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
  459. (REG_RANGE((reg), 0x2000, 0x2700) || \
  460. REG_RANGE((reg), 0x3000, 0x4000) || \
  461. REG_RANGE((reg), 0x5200, 0x8000) || \
  462. REG_RANGE((reg), 0x8140, 0x8160) || \
  463. REG_RANGE((reg), 0x8300, 0x8500) || \
  464. REG_RANGE((reg), 0x8C00, 0x8D00) || \
  465. REG_RANGE((reg), 0xB000, 0xB480) || \
  466. REG_RANGE((reg), 0xE000, 0xE900) || \
  467. REG_RANGE((reg), 0x24400, 0x24800))
  468. #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
  469. (REG_RANGE((reg), 0x8130, 0x8140) || \
  470. REG_RANGE((reg), 0x8800, 0x8A00) || \
  471. REG_RANGE((reg), 0xD000, 0xD800) || \
  472. REG_RANGE((reg), 0x12000, 0x14000) || \
  473. REG_RANGE((reg), 0x1A000, 0x1EA00) || \
  474. REG_RANGE((reg), 0x30000, 0x40000))
  475. #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
  476. REG_RANGE((reg), 0x9400, 0x9800)
  477. #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
  478. ((reg) < 0x40000 &&\
  479. !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
  480. !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
  481. !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
  482. !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
  483. static void
  484. ilk_dummy_write(struct drm_i915_private *dev_priv)
  485. {
  486. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  487. * the chip from rc6 before touching it for real. MI_MODE is masked,
  488. * hence harmless to write 0 into. */
  489. __raw_i915_write32(dev_priv, MI_MODE, 0);
  490. }
  491. static void
  492. hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
  493. bool before)
  494. {
  495. const char *op = read ? "reading" : "writing to";
  496. const char *when = before ? "before" : "after";
  497. if (!i915.mmio_debug)
  498. return;
  499. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  500. WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
  501. when, op, reg);
  502. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  503. i915.mmio_debug--; /* Only report the first N failures */
  504. }
  505. }
  506. static void
  507. hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
  508. {
  509. static bool mmio_debug_once = true;
  510. if (i915.mmio_debug || !mmio_debug_once)
  511. return;
  512. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  513. DRM_DEBUG("Unclaimed register detected, "
  514. "enabling oneshot unclaimed register reporting. "
  515. "Please use i915.mmio_debug=N for more information.\n");
  516. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  517. i915.mmio_debug = mmio_debug_once;
  518. mmio_debug_once = false;
  519. }
  520. }
  521. #define GEN2_READ_HEADER(x) \
  522. u##x val = 0; \
  523. assert_device_not_suspended(dev_priv);
  524. #define GEN2_READ_FOOTER \
  525. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  526. return val
  527. #define __gen2_read(x) \
  528. static u##x \
  529. gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  530. GEN2_READ_HEADER(x); \
  531. val = __raw_i915_read##x(dev_priv, reg); \
  532. GEN2_READ_FOOTER; \
  533. }
  534. #define __gen5_read(x) \
  535. static u##x \
  536. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  537. GEN2_READ_HEADER(x); \
  538. ilk_dummy_write(dev_priv); \
  539. val = __raw_i915_read##x(dev_priv, reg); \
  540. GEN2_READ_FOOTER; \
  541. }
  542. __gen5_read(8)
  543. __gen5_read(16)
  544. __gen5_read(32)
  545. __gen5_read(64)
  546. __gen2_read(8)
  547. __gen2_read(16)
  548. __gen2_read(32)
  549. __gen2_read(64)
  550. #undef __gen5_read
  551. #undef __gen2_read
  552. #undef GEN2_READ_FOOTER
  553. #undef GEN2_READ_HEADER
  554. #define GEN6_READ_HEADER(x) \
  555. unsigned long irqflags; \
  556. u##x val = 0; \
  557. assert_device_not_suspended(dev_priv); \
  558. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  559. #define GEN6_READ_FOOTER \
  560. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  561. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  562. return val
  563. static inline void __force_wake_get(struct drm_i915_private *dev_priv,
  564. enum forcewake_domains fw_domains)
  565. {
  566. struct intel_uncore_forcewake_domain *domain;
  567. enum forcewake_domain_id id;
  568. if (WARN_ON(!fw_domains))
  569. return;
  570. /* Ideally GCC would be constant-fold and eliminate this loop */
  571. for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
  572. if (domain->wake_count) {
  573. fw_domains &= ~(1 << id);
  574. continue;
  575. }
  576. domain->wake_count++;
  577. fw_domain_arm_timer(domain);
  578. }
  579. if (fw_domains)
  580. dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
  581. }
  582. #define __vgpu_read(x) \
  583. static u##x \
  584. vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  585. GEN6_READ_HEADER(x); \
  586. val = __raw_i915_read##x(dev_priv, reg); \
  587. GEN6_READ_FOOTER; \
  588. }
  589. #define __gen6_read(x) \
  590. static u##x \
  591. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  592. GEN6_READ_HEADER(x); \
  593. hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
  594. if (NEEDS_FORCE_WAKE(reg)) \
  595. __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  596. val = __raw_i915_read##x(dev_priv, reg); \
  597. hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
  598. GEN6_READ_FOOTER; \
  599. }
  600. #define __vlv_read(x) \
  601. static u##x \
  602. vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  603. GEN6_READ_HEADER(x); \
  604. if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
  605. __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  606. else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
  607. __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
  608. val = __raw_i915_read##x(dev_priv, reg); \
  609. GEN6_READ_FOOTER; \
  610. }
  611. #define __chv_read(x) \
  612. static u##x \
  613. chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  614. GEN6_READ_HEADER(x); \
  615. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
  616. __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  617. else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
  618. __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
  619. else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
  620. __force_wake_get(dev_priv, \
  621. FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
  622. val = __raw_i915_read##x(dev_priv, reg); \
  623. GEN6_READ_FOOTER; \
  624. }
  625. #define SKL_NEEDS_FORCE_WAKE(reg) \
  626. ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
  627. #define __gen9_read(x) \
  628. static u##x \
  629. gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  630. enum forcewake_domains fw_engine; \
  631. GEN6_READ_HEADER(x); \
  632. hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
  633. if (!SKL_NEEDS_FORCE_WAKE(reg)) \
  634. fw_engine = 0; \
  635. else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
  636. fw_engine = FORCEWAKE_RENDER; \
  637. else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
  638. fw_engine = FORCEWAKE_MEDIA; \
  639. else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
  640. fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
  641. else \
  642. fw_engine = FORCEWAKE_BLITTER; \
  643. if (fw_engine) \
  644. __force_wake_get(dev_priv, fw_engine); \
  645. val = __raw_i915_read##x(dev_priv, reg); \
  646. hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
  647. GEN6_READ_FOOTER; \
  648. }
  649. __vgpu_read(8)
  650. __vgpu_read(16)
  651. __vgpu_read(32)
  652. __vgpu_read(64)
  653. __gen9_read(8)
  654. __gen9_read(16)
  655. __gen9_read(32)
  656. __gen9_read(64)
  657. __chv_read(8)
  658. __chv_read(16)
  659. __chv_read(32)
  660. __chv_read(64)
  661. __vlv_read(8)
  662. __vlv_read(16)
  663. __vlv_read(32)
  664. __vlv_read(64)
  665. __gen6_read(8)
  666. __gen6_read(16)
  667. __gen6_read(32)
  668. __gen6_read(64)
  669. #undef __gen9_read
  670. #undef __chv_read
  671. #undef __vlv_read
  672. #undef __gen6_read
  673. #undef __vgpu_read
  674. #undef GEN6_READ_FOOTER
  675. #undef GEN6_READ_HEADER
  676. #define GEN2_WRITE_HEADER \
  677. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  678. assert_device_not_suspended(dev_priv); \
  679. #define GEN2_WRITE_FOOTER
  680. #define __gen2_write(x) \
  681. static void \
  682. gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  683. GEN2_WRITE_HEADER; \
  684. __raw_i915_write##x(dev_priv, reg, val); \
  685. GEN2_WRITE_FOOTER; \
  686. }
  687. #define __gen5_write(x) \
  688. static void \
  689. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  690. GEN2_WRITE_HEADER; \
  691. ilk_dummy_write(dev_priv); \
  692. __raw_i915_write##x(dev_priv, reg, val); \
  693. GEN2_WRITE_FOOTER; \
  694. }
  695. __gen5_write(8)
  696. __gen5_write(16)
  697. __gen5_write(32)
  698. __gen5_write(64)
  699. __gen2_write(8)
  700. __gen2_write(16)
  701. __gen2_write(32)
  702. __gen2_write(64)
  703. #undef __gen5_write
  704. #undef __gen2_write
  705. #undef GEN2_WRITE_FOOTER
  706. #undef GEN2_WRITE_HEADER
  707. #define GEN6_WRITE_HEADER \
  708. unsigned long irqflags; \
  709. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  710. assert_device_not_suspended(dev_priv); \
  711. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  712. #define GEN6_WRITE_FOOTER \
  713. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
  714. #define __gen6_write(x) \
  715. static void \
  716. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  717. u32 __fifo_ret = 0; \
  718. GEN6_WRITE_HEADER; \
  719. if (NEEDS_FORCE_WAKE(reg)) { \
  720. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  721. } \
  722. __raw_i915_write##x(dev_priv, reg, val); \
  723. if (unlikely(__fifo_ret)) { \
  724. gen6_gt_check_fifodbg(dev_priv); \
  725. } \
  726. GEN6_WRITE_FOOTER; \
  727. }
  728. #define __hsw_write(x) \
  729. static void \
  730. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  731. u32 __fifo_ret = 0; \
  732. GEN6_WRITE_HEADER; \
  733. if (NEEDS_FORCE_WAKE(reg)) { \
  734. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  735. } \
  736. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  737. __raw_i915_write##x(dev_priv, reg, val); \
  738. if (unlikely(__fifo_ret)) { \
  739. gen6_gt_check_fifodbg(dev_priv); \
  740. } \
  741. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  742. hsw_unclaimed_reg_detect(dev_priv); \
  743. GEN6_WRITE_FOOTER; \
  744. }
  745. #define __vgpu_write(x) \
  746. static void vgpu_write##x(struct drm_i915_private *dev_priv, \
  747. off_t reg, u##x val, bool trace) { \
  748. GEN6_WRITE_HEADER; \
  749. __raw_i915_write##x(dev_priv, reg, val); \
  750. GEN6_WRITE_FOOTER; \
  751. }
  752. static const u32 gen8_shadowed_regs[] = {
  753. FORCEWAKE_MT,
  754. GEN6_RPNSWREQ,
  755. GEN6_RC_VIDEO_FREQ,
  756. RING_TAIL(RENDER_RING_BASE),
  757. RING_TAIL(GEN6_BSD_RING_BASE),
  758. RING_TAIL(VEBOX_RING_BASE),
  759. RING_TAIL(BLT_RING_BASE),
  760. /* TODO: Other registers are not yet used */
  761. };
  762. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  763. {
  764. int i;
  765. for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  766. if (reg == gen8_shadowed_regs[i])
  767. return true;
  768. return false;
  769. }
  770. #define __gen8_write(x) \
  771. static void \
  772. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  773. GEN6_WRITE_HEADER; \
  774. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  775. if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
  776. __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  777. __raw_i915_write##x(dev_priv, reg, val); \
  778. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  779. hsw_unclaimed_reg_detect(dev_priv); \
  780. GEN6_WRITE_FOOTER; \
  781. }
  782. #define __chv_write(x) \
  783. static void \
  784. chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  785. bool shadowed = is_gen8_shadowed(dev_priv, reg); \
  786. GEN6_WRITE_HEADER; \
  787. if (!shadowed) { \
  788. if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
  789. __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
  790. else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
  791. __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
  792. else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
  793. __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
  794. } \
  795. __raw_i915_write##x(dev_priv, reg, val); \
  796. GEN6_WRITE_FOOTER; \
  797. }
  798. static const u32 gen9_shadowed_regs[] = {
  799. RING_TAIL(RENDER_RING_BASE),
  800. RING_TAIL(GEN6_BSD_RING_BASE),
  801. RING_TAIL(VEBOX_RING_BASE),
  802. RING_TAIL(BLT_RING_BASE),
  803. FORCEWAKE_BLITTER_GEN9,
  804. FORCEWAKE_RENDER_GEN9,
  805. FORCEWAKE_MEDIA_GEN9,
  806. GEN6_RPNSWREQ,
  807. GEN6_RC_VIDEO_FREQ,
  808. /* TODO: Other registers are not yet used */
  809. };
  810. static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  811. {
  812. int i;
  813. for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
  814. if (reg == gen9_shadowed_regs[i])
  815. return true;
  816. return false;
  817. }
  818. #define __gen9_write(x) \
  819. static void \
  820. gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
  821. bool trace) { \
  822. enum forcewake_domains fw_engine; \
  823. GEN6_WRITE_HEADER; \
  824. hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
  825. if (!SKL_NEEDS_FORCE_WAKE(reg) || \
  826. is_gen9_shadowed(dev_priv, reg)) \
  827. fw_engine = 0; \
  828. else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
  829. fw_engine = FORCEWAKE_RENDER; \
  830. else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
  831. fw_engine = FORCEWAKE_MEDIA; \
  832. else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
  833. fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
  834. else \
  835. fw_engine = FORCEWAKE_BLITTER; \
  836. if (fw_engine) \
  837. __force_wake_get(dev_priv, fw_engine); \
  838. __raw_i915_write##x(dev_priv, reg, val); \
  839. hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
  840. hsw_unclaimed_reg_detect(dev_priv); \
  841. GEN6_WRITE_FOOTER; \
  842. }
  843. __gen9_write(8)
  844. __gen9_write(16)
  845. __gen9_write(32)
  846. __gen9_write(64)
  847. __chv_write(8)
  848. __chv_write(16)
  849. __chv_write(32)
  850. __chv_write(64)
  851. __gen8_write(8)
  852. __gen8_write(16)
  853. __gen8_write(32)
  854. __gen8_write(64)
  855. __hsw_write(8)
  856. __hsw_write(16)
  857. __hsw_write(32)
  858. __hsw_write(64)
  859. __gen6_write(8)
  860. __gen6_write(16)
  861. __gen6_write(32)
  862. __gen6_write(64)
  863. __vgpu_write(8)
  864. __vgpu_write(16)
  865. __vgpu_write(32)
  866. __vgpu_write(64)
  867. #undef __gen9_write
  868. #undef __chv_write
  869. #undef __gen8_write
  870. #undef __hsw_write
  871. #undef __gen6_write
  872. #undef __vgpu_write
  873. #undef GEN6_WRITE_FOOTER
  874. #undef GEN6_WRITE_HEADER
  875. #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
  876. do { \
  877. dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
  878. dev_priv->uncore.funcs.mmio_writew = x##_write16; \
  879. dev_priv->uncore.funcs.mmio_writel = x##_write32; \
  880. dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
  881. } while (0)
  882. #define ASSIGN_READ_MMIO_VFUNCS(x) \
  883. do { \
  884. dev_priv->uncore.funcs.mmio_readb = x##_read8; \
  885. dev_priv->uncore.funcs.mmio_readw = x##_read16; \
  886. dev_priv->uncore.funcs.mmio_readl = x##_read32; \
  887. dev_priv->uncore.funcs.mmio_readq = x##_read64; \
  888. } while (0)
  889. static void fw_domain_init(struct drm_i915_private *dev_priv,
  890. enum forcewake_domain_id domain_id,
  891. u32 reg_set, u32 reg_ack)
  892. {
  893. struct intel_uncore_forcewake_domain *d;
  894. if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
  895. return;
  896. d = &dev_priv->uncore.fw_domain[domain_id];
  897. WARN_ON(d->wake_count);
  898. d->wake_count = 0;
  899. d->reg_set = reg_set;
  900. d->reg_ack = reg_ack;
  901. if (IS_GEN6(dev_priv)) {
  902. d->val_reset = 0;
  903. d->val_set = FORCEWAKE_KERNEL;
  904. d->val_clear = 0;
  905. } else {
  906. /* WaRsClearFWBitsAtReset:bdw,skl */
  907. d->val_reset = _MASKED_BIT_DISABLE(0xffff);
  908. d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
  909. d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
  910. }
  911. if (IS_VALLEYVIEW(dev_priv))
  912. d->reg_post = FORCEWAKE_ACK_VLV;
  913. else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
  914. d->reg_post = ECOBUS;
  915. else
  916. d->reg_post = 0;
  917. d->i915 = dev_priv;
  918. d->id = domain_id;
  919. setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
  920. dev_priv->uncore.fw_domains |= (1 << domain_id);
  921. fw_domain_reset(d);
  922. }
  923. static void intel_uncore_fw_domains_init(struct drm_device *dev)
  924. {
  925. struct drm_i915_private *dev_priv = dev->dev_private;
  926. if (INTEL_INFO(dev_priv->dev)->gen <= 5)
  927. return;
  928. if (IS_GEN9(dev)) {
  929. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  930. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  931. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  932. FORCEWAKE_RENDER_GEN9,
  933. FORCEWAKE_ACK_RENDER_GEN9);
  934. fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
  935. FORCEWAKE_BLITTER_GEN9,
  936. FORCEWAKE_ACK_BLITTER_GEN9);
  937. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  938. FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
  939. } else if (IS_VALLEYVIEW(dev)) {
  940. dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
  941. if (!IS_CHERRYVIEW(dev))
  942. dev_priv->uncore.funcs.force_wake_put =
  943. fw_domains_put_with_fifo;
  944. else
  945. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  946. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  947. FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
  948. fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
  949. FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
  950. } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
  951. dev_priv->uncore.funcs.force_wake_get =
  952. fw_domains_get_with_thread_status;
  953. if (IS_HASWELL(dev))
  954. dev_priv->uncore.funcs.force_wake_put =
  955. fw_domains_put_with_fifo;
  956. else
  957. dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
  958. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  959. FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
  960. } else if (IS_IVYBRIDGE(dev)) {
  961. u32 ecobus;
  962. /* IVB configs may use multi-threaded forcewake */
  963. /* A small trick here - if the bios hasn't configured
  964. * MT forcewake, and if the device is in RC6, then
  965. * force_wake_mt_get will not wake the device and the
  966. * ECOBUS read will return zero. Which will be
  967. * (correctly) interpreted by the test below as MT
  968. * forcewake being disabled.
  969. */
  970. dev_priv->uncore.funcs.force_wake_get =
  971. fw_domains_get_with_thread_status;
  972. dev_priv->uncore.funcs.force_wake_put =
  973. fw_domains_put_with_fifo;
  974. /* We need to init first for ECOBUS access and then
  975. * determine later if we want to reinit, in case of MT access is
  976. * not working. In this stage we don't know which flavour this
  977. * ivb is, so it is better to reset also the gen6 fw registers
  978. * before the ecobus check.
  979. */
  980. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  981. __raw_posting_read(dev_priv, ECOBUS);
  982. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  983. FORCEWAKE_MT, FORCEWAKE_MT_ACK);
  984. mutex_lock(&dev->struct_mutex);
  985. fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
  986. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  987. fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
  988. mutex_unlock(&dev->struct_mutex);
  989. if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
  990. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  991. DRM_INFO("when using vblank-synced partial screen updates.\n");
  992. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  993. FORCEWAKE, FORCEWAKE_ACK);
  994. }
  995. } else if (IS_GEN6(dev)) {
  996. dev_priv->uncore.funcs.force_wake_get =
  997. fw_domains_get_with_thread_status;
  998. dev_priv->uncore.funcs.force_wake_put =
  999. fw_domains_put_with_fifo;
  1000. fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
  1001. FORCEWAKE, FORCEWAKE_ACK);
  1002. }
  1003. /* All future platforms are expected to require complex power gating */
  1004. WARN_ON(dev_priv->uncore.fw_domains == 0);
  1005. }
  1006. void intel_uncore_init(struct drm_device *dev)
  1007. {
  1008. struct drm_i915_private *dev_priv = dev->dev_private;
  1009. i915_check_vgpu(dev);
  1010. intel_uncore_ellc_detect(dev);
  1011. intel_uncore_fw_domains_init(dev);
  1012. __intel_uncore_early_sanitize(dev, false);
  1013. switch (INTEL_INFO(dev)->gen) {
  1014. default:
  1015. case 9:
  1016. ASSIGN_WRITE_MMIO_VFUNCS(gen9);
  1017. ASSIGN_READ_MMIO_VFUNCS(gen9);
  1018. break;
  1019. case 8:
  1020. if (IS_CHERRYVIEW(dev)) {
  1021. ASSIGN_WRITE_MMIO_VFUNCS(chv);
  1022. ASSIGN_READ_MMIO_VFUNCS(chv);
  1023. } else {
  1024. ASSIGN_WRITE_MMIO_VFUNCS(gen8);
  1025. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1026. }
  1027. break;
  1028. case 7:
  1029. case 6:
  1030. if (IS_HASWELL(dev)) {
  1031. ASSIGN_WRITE_MMIO_VFUNCS(hsw);
  1032. } else {
  1033. ASSIGN_WRITE_MMIO_VFUNCS(gen6);
  1034. }
  1035. if (IS_VALLEYVIEW(dev)) {
  1036. ASSIGN_READ_MMIO_VFUNCS(vlv);
  1037. } else {
  1038. ASSIGN_READ_MMIO_VFUNCS(gen6);
  1039. }
  1040. break;
  1041. case 5:
  1042. ASSIGN_WRITE_MMIO_VFUNCS(gen5);
  1043. ASSIGN_READ_MMIO_VFUNCS(gen5);
  1044. break;
  1045. case 4:
  1046. case 3:
  1047. case 2:
  1048. ASSIGN_WRITE_MMIO_VFUNCS(gen2);
  1049. ASSIGN_READ_MMIO_VFUNCS(gen2);
  1050. break;
  1051. }
  1052. if (intel_vgpu_active(dev)) {
  1053. ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
  1054. ASSIGN_READ_MMIO_VFUNCS(vgpu);
  1055. }
  1056. i915_check_and_clear_faults(dev);
  1057. }
  1058. #undef ASSIGN_WRITE_MMIO_VFUNCS
  1059. #undef ASSIGN_READ_MMIO_VFUNCS
  1060. void intel_uncore_fini(struct drm_device *dev)
  1061. {
  1062. /* Paranoia: make sure we have disabled everything before we exit. */
  1063. intel_uncore_sanitize(dev);
  1064. intel_uncore_forcewake_reset(dev, false);
  1065. }
  1066. #define GEN_RANGE(l, h) GENMASK(h, l)
  1067. static const struct register_whitelist {
  1068. uint64_t offset;
  1069. uint32_t size;
  1070. /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  1071. uint32_t gen_bitmask;
  1072. } whitelist[] = {
  1073. { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
  1074. };
  1075. int i915_reg_read_ioctl(struct drm_device *dev,
  1076. void *data, struct drm_file *file)
  1077. {
  1078. struct drm_i915_private *dev_priv = dev->dev_private;
  1079. struct drm_i915_reg_read *reg = data;
  1080. struct register_whitelist const *entry = whitelist;
  1081. unsigned size;
  1082. u64 offset;
  1083. int i, ret = 0;
  1084. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  1085. if (entry->offset == (reg->offset & -entry->size) &&
  1086. (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  1087. break;
  1088. }
  1089. if (i == ARRAY_SIZE(whitelist))
  1090. return -EINVAL;
  1091. /* We use the low bits to encode extra flags as the register should
  1092. * be naturally aligned (and those that are not so aligned merely
  1093. * limit the available flags for that register).
  1094. */
  1095. offset = entry->offset;
  1096. size = entry->size;
  1097. size |= reg->offset ^ offset;
  1098. intel_runtime_pm_get(dev_priv);
  1099. switch (size) {
  1100. case 8 | 1:
  1101. reg->val = I915_READ64_2x32(offset, offset+4);
  1102. break;
  1103. case 8:
  1104. reg->val = I915_READ64(offset);
  1105. break;
  1106. case 4:
  1107. reg->val = I915_READ(offset);
  1108. break;
  1109. case 2:
  1110. reg->val = I915_READ16(offset);
  1111. break;
  1112. case 1:
  1113. reg->val = I915_READ8(offset);
  1114. break;
  1115. default:
  1116. ret = -EINVAL;
  1117. goto out;
  1118. }
  1119. out:
  1120. intel_runtime_pm_put(dev_priv);
  1121. return ret;
  1122. }
  1123. int i915_get_reset_stats_ioctl(struct drm_device *dev,
  1124. void *data, struct drm_file *file)
  1125. {
  1126. struct drm_i915_private *dev_priv = dev->dev_private;
  1127. struct drm_i915_reset_stats *args = data;
  1128. struct i915_ctx_hang_stats *hs;
  1129. struct intel_context *ctx;
  1130. int ret;
  1131. if (args->flags || args->pad)
  1132. return -EINVAL;
  1133. if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
  1134. return -EPERM;
  1135. ret = mutex_lock_interruptible(&dev->struct_mutex);
  1136. if (ret)
  1137. return ret;
  1138. ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
  1139. if (IS_ERR(ctx)) {
  1140. mutex_unlock(&dev->struct_mutex);
  1141. return PTR_ERR(ctx);
  1142. }
  1143. hs = &ctx->hang_stats;
  1144. if (capable(CAP_SYS_ADMIN))
  1145. args->reset_count = i915_reset_count(&dev_priv->gpu_error);
  1146. else
  1147. args->reset_count = 0;
  1148. args->batch_active = hs->batch_active;
  1149. args->batch_pending = hs->batch_pending;
  1150. mutex_unlock(&dev->struct_mutex);
  1151. return 0;
  1152. }
  1153. static int i915_reset_complete(struct drm_device *dev)
  1154. {
  1155. u8 gdrst;
  1156. pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
  1157. return (gdrst & GRDOM_RESET_STATUS) == 0;
  1158. }
  1159. static int i915_do_reset(struct drm_device *dev)
  1160. {
  1161. /* assert reset for at least 20 usec */
  1162. pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1163. udelay(20);
  1164. pci_write_config_byte(dev->pdev, I915_GDRST, 0);
  1165. return wait_for(i915_reset_complete(dev), 500);
  1166. }
  1167. static int g4x_reset_complete(struct drm_device *dev)
  1168. {
  1169. u8 gdrst;
  1170. pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
  1171. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  1172. }
  1173. static int g33_do_reset(struct drm_device *dev)
  1174. {
  1175. pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
  1176. return wait_for(g4x_reset_complete(dev), 500);
  1177. }
  1178. static int g4x_do_reset(struct drm_device *dev)
  1179. {
  1180. struct drm_i915_private *dev_priv = dev->dev_private;
  1181. int ret;
  1182. pci_write_config_byte(dev->pdev, I915_GDRST,
  1183. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  1184. ret = wait_for(g4x_reset_complete(dev), 500);
  1185. if (ret)
  1186. return ret;
  1187. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1188. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
  1189. POSTING_READ(VDECCLK_GATE_D);
  1190. pci_write_config_byte(dev->pdev, I915_GDRST,
  1191. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  1192. ret = wait_for(g4x_reset_complete(dev), 500);
  1193. if (ret)
  1194. return ret;
  1195. /* WaVcpClkGateDisableForMediaReset:ctg,elk */
  1196. I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
  1197. POSTING_READ(VDECCLK_GATE_D);
  1198. pci_write_config_byte(dev->pdev, I915_GDRST, 0);
  1199. return 0;
  1200. }
  1201. static int ironlake_do_reset(struct drm_device *dev)
  1202. {
  1203. struct drm_i915_private *dev_priv = dev->dev_private;
  1204. int ret;
  1205. I915_WRITE(ILK_GDSR,
  1206. ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
  1207. ret = wait_for((I915_READ(ILK_GDSR) &
  1208. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1209. if (ret)
  1210. return ret;
  1211. I915_WRITE(ILK_GDSR,
  1212. ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
  1213. ret = wait_for((I915_READ(ILK_GDSR) &
  1214. ILK_GRDOM_RESET_ENABLE) == 0, 500);
  1215. if (ret)
  1216. return ret;
  1217. I915_WRITE(ILK_GDSR, 0);
  1218. return 0;
  1219. }
  1220. static int gen6_do_reset(struct drm_device *dev)
  1221. {
  1222. struct drm_i915_private *dev_priv = dev->dev_private;
  1223. int ret;
  1224. /* Reset the chip */
  1225. /* GEN6_GDRST is not in the gt power well, no need to check
  1226. * for fifo space for the write or forcewake the chip for
  1227. * the read
  1228. */
  1229. __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  1230. /* Spin waiting for the device to ack the reset request */
  1231. ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  1232. intel_uncore_forcewake_reset(dev, true);
  1233. return ret;
  1234. }
  1235. static int wait_for_register(struct drm_i915_private *dev_priv,
  1236. const u32 reg,
  1237. const u32 mask,
  1238. const u32 value,
  1239. const unsigned long timeout_ms)
  1240. {
  1241. return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
  1242. }
  1243. static int gen8_do_reset(struct drm_device *dev)
  1244. {
  1245. struct drm_i915_private *dev_priv = dev->dev_private;
  1246. struct intel_engine_cs *engine;
  1247. int i;
  1248. for_each_ring(engine, dev_priv, i) {
  1249. I915_WRITE(RING_RESET_CTL(engine->mmio_base),
  1250. _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
  1251. if (wait_for_register(dev_priv,
  1252. RING_RESET_CTL(engine->mmio_base),
  1253. RESET_CTL_READY_TO_RESET,
  1254. RESET_CTL_READY_TO_RESET,
  1255. 700)) {
  1256. DRM_ERROR("%s: reset request timeout\n", engine->name);
  1257. goto not_ready;
  1258. }
  1259. }
  1260. return gen6_do_reset(dev);
  1261. not_ready:
  1262. for_each_ring(engine, dev_priv, i)
  1263. I915_WRITE(RING_RESET_CTL(engine->mmio_base),
  1264. _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
  1265. return -EIO;
  1266. }
  1267. static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
  1268. {
  1269. if (!i915.reset)
  1270. return NULL;
  1271. if (INTEL_INFO(dev)->gen >= 8)
  1272. return gen8_do_reset;
  1273. else if (INTEL_INFO(dev)->gen >= 6)
  1274. return gen6_do_reset;
  1275. else if (IS_GEN5(dev))
  1276. return ironlake_do_reset;
  1277. else if (IS_G4X(dev))
  1278. return g4x_do_reset;
  1279. else if (IS_G33(dev))
  1280. return g33_do_reset;
  1281. else if (INTEL_INFO(dev)->gen >= 3)
  1282. return i915_do_reset;
  1283. else
  1284. return NULL;
  1285. }
  1286. int intel_gpu_reset(struct drm_device *dev)
  1287. {
  1288. struct drm_i915_private *dev_priv = to_i915(dev);
  1289. int (*reset)(struct drm_device *);
  1290. int ret;
  1291. reset = intel_get_gpu_reset(dev);
  1292. if (reset == NULL)
  1293. return -ENODEV;
  1294. /* If the power well sleeps during the reset, the reset
  1295. * request may be dropped and never completes (causing -EIO).
  1296. */
  1297. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  1298. ret = reset(dev);
  1299. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  1300. return ret;
  1301. }
  1302. bool intel_has_gpu_reset(struct drm_device *dev)
  1303. {
  1304. return intel_get_gpu_reset(dev) != NULL;
  1305. }
  1306. void intel_uncore_check_errors(struct drm_device *dev)
  1307. {
  1308. struct drm_i915_private *dev_priv = dev->dev_private;
  1309. if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  1310. (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  1311. DRM_ERROR("Unclaimed register before interrupt\n");
  1312. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  1313. }
  1314. }