atom.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Stanislaw Skowronek
  23. */
  24. #include <linux/module.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <asm/unaligned.h>
  28. #define ATOM_DEBUG
  29. #include "atom.h"
  30. #include "atom-names.h"
  31. #include "atom-bits.h"
  32. #include "amdgpu.h"
  33. #define ATOM_COND_ABOVE 0
  34. #define ATOM_COND_ABOVEOREQUAL 1
  35. #define ATOM_COND_ALWAYS 2
  36. #define ATOM_COND_BELOW 3
  37. #define ATOM_COND_BELOWOREQUAL 4
  38. #define ATOM_COND_EQUAL 5
  39. #define ATOM_COND_NOTEQUAL 6
  40. #define ATOM_PORT_ATI 0
  41. #define ATOM_PORT_PCI 1
  42. #define ATOM_PORT_SYSIO 2
  43. #define ATOM_UNIT_MICROSEC 0
  44. #define ATOM_UNIT_MILLISEC 1
  45. #define PLL_INDEX 2
  46. #define PLL_DATA 3
  47. typedef struct {
  48. struct atom_context *ctx;
  49. uint32_t *ps, *ws;
  50. int ps_shift;
  51. uint16_t start;
  52. unsigned last_jump;
  53. unsigned long last_jump_jiffies;
  54. bool abort;
  55. } atom_exec_context;
  56. int amdgpu_atom_debug = 0;
  57. static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
  58. int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
  59. static uint32_t atom_arg_mask[8] =
  60. { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
  61. 0xFF000000 };
  62. static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
  63. static int atom_dst_to_src[8][4] = {
  64. /* translate destination alignment field to the source alignment encoding */
  65. {0, 0, 0, 0},
  66. {1, 2, 3, 0},
  67. {1, 2, 3, 0},
  68. {1, 2, 3, 0},
  69. {4, 5, 6, 7},
  70. {4, 5, 6, 7},
  71. {4, 5, 6, 7},
  72. {4, 5, 6, 7},
  73. };
  74. static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
  75. static int debug_depth = 0;
  76. #ifdef ATOM_DEBUG
  77. static void debug_print_spaces(int n)
  78. {
  79. while (n--)
  80. printk(" ");
  81. }
  82. #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
  83. #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
  84. #else
  85. #define DEBUG(...) do { } while (0)
  86. #define SDEBUG(...) do { } while (0)
  87. #endif
  88. static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
  89. uint32_t index, uint32_t data)
  90. {
  91. uint32_t temp = 0xCDCDCDCD;
  92. while (1)
  93. switch (CU8(base)) {
  94. case ATOM_IIO_NOP:
  95. base++;
  96. break;
  97. case ATOM_IIO_READ:
  98. temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
  99. base += 3;
  100. break;
  101. case ATOM_IIO_WRITE:
  102. ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
  103. base += 3;
  104. break;
  105. case ATOM_IIO_CLEAR:
  106. temp &=
  107. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  108. CU8(base + 2));
  109. base += 3;
  110. break;
  111. case ATOM_IIO_SET:
  112. temp |=
  113. (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
  114. 2);
  115. base += 3;
  116. break;
  117. case ATOM_IIO_MOVE_INDEX:
  118. temp &=
  119. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  120. CU8(base + 3));
  121. temp |=
  122. ((index >> CU8(base + 2)) &
  123. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  124. 3);
  125. base += 4;
  126. break;
  127. case ATOM_IIO_MOVE_DATA:
  128. temp &=
  129. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  130. CU8(base + 3));
  131. temp |=
  132. ((data >> CU8(base + 2)) &
  133. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  134. 3);
  135. base += 4;
  136. break;
  137. case ATOM_IIO_MOVE_ATTR:
  138. temp &=
  139. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  140. CU8(base + 3));
  141. temp |=
  142. ((ctx->
  143. io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
  144. CU8
  145. (base
  146. +
  147. 1))))
  148. << CU8(base + 3);
  149. base += 4;
  150. break;
  151. case ATOM_IIO_END:
  152. return temp;
  153. default:
  154. printk(KERN_INFO "Unknown IIO opcode.\n");
  155. return 0;
  156. }
  157. }
  158. static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
  159. int *ptr, uint32_t *saved, int print)
  160. {
  161. uint32_t idx, val = 0xCDCDCDCD, align, arg;
  162. struct atom_context *gctx = ctx->ctx;
  163. arg = attr & 7;
  164. align = (attr >> 3) & 7;
  165. switch (arg) {
  166. case ATOM_ARG_REG:
  167. idx = U16(*ptr);
  168. (*ptr) += 2;
  169. if (print)
  170. DEBUG("REG[0x%04X]", idx);
  171. idx += gctx->reg_block;
  172. switch (gctx->io_mode) {
  173. case ATOM_IO_MM:
  174. val = gctx->card->reg_read(gctx->card, idx);
  175. break;
  176. case ATOM_IO_PCI:
  177. printk(KERN_INFO
  178. "PCI registers are not implemented.\n");
  179. return 0;
  180. case ATOM_IO_SYSIO:
  181. printk(KERN_INFO
  182. "SYSIO registers are not implemented.\n");
  183. return 0;
  184. default:
  185. if (!(gctx->io_mode & 0x80)) {
  186. printk(KERN_INFO "Bad IO mode.\n");
  187. return 0;
  188. }
  189. if (!gctx->iio[gctx->io_mode & 0x7F]) {
  190. printk(KERN_INFO
  191. "Undefined indirect IO read method %d.\n",
  192. gctx->io_mode & 0x7F);
  193. return 0;
  194. }
  195. val =
  196. atom_iio_execute(gctx,
  197. gctx->iio[gctx->io_mode & 0x7F],
  198. idx, 0);
  199. }
  200. break;
  201. case ATOM_ARG_PS:
  202. idx = U8(*ptr);
  203. (*ptr)++;
  204. /* get_unaligned_le32 avoids unaligned accesses from atombios
  205. * tables, noticed on a DEC Alpha. */
  206. val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
  207. if (print)
  208. DEBUG("PS[0x%02X,0x%04X]", idx, val);
  209. break;
  210. case ATOM_ARG_WS:
  211. idx = U8(*ptr);
  212. (*ptr)++;
  213. if (print)
  214. DEBUG("WS[0x%02X]", idx);
  215. switch (idx) {
  216. case ATOM_WS_QUOTIENT:
  217. val = gctx->divmul[0];
  218. break;
  219. case ATOM_WS_REMAINDER:
  220. val = gctx->divmul[1];
  221. break;
  222. case ATOM_WS_DATAPTR:
  223. val = gctx->data_block;
  224. break;
  225. case ATOM_WS_SHIFT:
  226. val = gctx->shift;
  227. break;
  228. case ATOM_WS_OR_MASK:
  229. val = 1 << gctx->shift;
  230. break;
  231. case ATOM_WS_AND_MASK:
  232. val = ~(1 << gctx->shift);
  233. break;
  234. case ATOM_WS_FB_WINDOW:
  235. val = gctx->fb_base;
  236. break;
  237. case ATOM_WS_ATTRIBUTES:
  238. val = gctx->io_attr;
  239. break;
  240. case ATOM_WS_REGPTR:
  241. val = gctx->reg_block;
  242. break;
  243. default:
  244. val = ctx->ws[idx];
  245. }
  246. break;
  247. case ATOM_ARG_ID:
  248. idx = U16(*ptr);
  249. (*ptr) += 2;
  250. if (print) {
  251. if (gctx->data_block)
  252. DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
  253. else
  254. DEBUG("ID[0x%04X]", idx);
  255. }
  256. val = U32(idx + gctx->data_block);
  257. break;
  258. case ATOM_ARG_FB:
  259. idx = U8(*ptr);
  260. (*ptr)++;
  261. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  262. DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
  263. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  264. val = 0;
  265. } else
  266. val = gctx->scratch[(gctx->fb_base / 4) + idx];
  267. if (print)
  268. DEBUG("FB[0x%02X]", idx);
  269. break;
  270. case ATOM_ARG_IMM:
  271. switch (align) {
  272. case ATOM_SRC_DWORD:
  273. val = U32(*ptr);
  274. (*ptr) += 4;
  275. if (print)
  276. DEBUG("IMM 0x%08X\n", val);
  277. return val;
  278. case ATOM_SRC_WORD0:
  279. case ATOM_SRC_WORD8:
  280. case ATOM_SRC_WORD16:
  281. val = U16(*ptr);
  282. (*ptr) += 2;
  283. if (print)
  284. DEBUG("IMM 0x%04X\n", val);
  285. return val;
  286. case ATOM_SRC_BYTE0:
  287. case ATOM_SRC_BYTE8:
  288. case ATOM_SRC_BYTE16:
  289. case ATOM_SRC_BYTE24:
  290. val = U8(*ptr);
  291. (*ptr)++;
  292. if (print)
  293. DEBUG("IMM 0x%02X\n", val);
  294. return val;
  295. }
  296. return 0;
  297. case ATOM_ARG_PLL:
  298. idx = U8(*ptr);
  299. (*ptr)++;
  300. if (print)
  301. DEBUG("PLL[0x%02X]", idx);
  302. val = gctx->card->pll_read(gctx->card, idx);
  303. break;
  304. case ATOM_ARG_MC:
  305. idx = U8(*ptr);
  306. (*ptr)++;
  307. if (print)
  308. DEBUG("MC[0x%02X]", idx);
  309. val = gctx->card->mc_read(gctx->card, idx);
  310. break;
  311. }
  312. if (saved)
  313. *saved = val;
  314. val &= atom_arg_mask[align];
  315. val >>= atom_arg_shift[align];
  316. if (print)
  317. switch (align) {
  318. case ATOM_SRC_DWORD:
  319. DEBUG(".[31:0] -> 0x%08X\n", val);
  320. break;
  321. case ATOM_SRC_WORD0:
  322. DEBUG(".[15:0] -> 0x%04X\n", val);
  323. break;
  324. case ATOM_SRC_WORD8:
  325. DEBUG(".[23:8] -> 0x%04X\n", val);
  326. break;
  327. case ATOM_SRC_WORD16:
  328. DEBUG(".[31:16] -> 0x%04X\n", val);
  329. break;
  330. case ATOM_SRC_BYTE0:
  331. DEBUG(".[7:0] -> 0x%02X\n", val);
  332. break;
  333. case ATOM_SRC_BYTE8:
  334. DEBUG(".[15:8] -> 0x%02X\n", val);
  335. break;
  336. case ATOM_SRC_BYTE16:
  337. DEBUG(".[23:16] -> 0x%02X\n", val);
  338. break;
  339. case ATOM_SRC_BYTE24:
  340. DEBUG(".[31:24] -> 0x%02X\n", val);
  341. break;
  342. }
  343. return val;
  344. }
  345. static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
  346. {
  347. uint32_t align = (attr >> 3) & 7, arg = attr & 7;
  348. switch (arg) {
  349. case ATOM_ARG_REG:
  350. case ATOM_ARG_ID:
  351. (*ptr) += 2;
  352. break;
  353. case ATOM_ARG_PLL:
  354. case ATOM_ARG_MC:
  355. case ATOM_ARG_PS:
  356. case ATOM_ARG_WS:
  357. case ATOM_ARG_FB:
  358. (*ptr)++;
  359. break;
  360. case ATOM_ARG_IMM:
  361. switch (align) {
  362. case ATOM_SRC_DWORD:
  363. (*ptr) += 4;
  364. return;
  365. case ATOM_SRC_WORD0:
  366. case ATOM_SRC_WORD8:
  367. case ATOM_SRC_WORD16:
  368. (*ptr) += 2;
  369. return;
  370. case ATOM_SRC_BYTE0:
  371. case ATOM_SRC_BYTE8:
  372. case ATOM_SRC_BYTE16:
  373. case ATOM_SRC_BYTE24:
  374. (*ptr)++;
  375. return;
  376. }
  377. return;
  378. }
  379. }
  380. static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
  381. {
  382. return atom_get_src_int(ctx, attr, ptr, NULL, 1);
  383. }
  384. static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
  385. {
  386. uint32_t val = 0xCDCDCDCD;
  387. switch (align) {
  388. case ATOM_SRC_DWORD:
  389. val = U32(*ptr);
  390. (*ptr) += 4;
  391. break;
  392. case ATOM_SRC_WORD0:
  393. case ATOM_SRC_WORD8:
  394. case ATOM_SRC_WORD16:
  395. val = U16(*ptr);
  396. (*ptr) += 2;
  397. break;
  398. case ATOM_SRC_BYTE0:
  399. case ATOM_SRC_BYTE8:
  400. case ATOM_SRC_BYTE16:
  401. case ATOM_SRC_BYTE24:
  402. val = U8(*ptr);
  403. (*ptr)++;
  404. break;
  405. }
  406. return val;
  407. }
  408. static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  409. int *ptr, uint32_t *saved, int print)
  410. {
  411. return atom_get_src_int(ctx,
  412. arg | atom_dst_to_src[(attr >> 3) &
  413. 7][(attr >> 6) & 3] << 3,
  414. ptr, saved, print);
  415. }
  416. static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
  417. {
  418. atom_skip_src_int(ctx,
  419. arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
  420. 3] << 3, ptr);
  421. }
  422. static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  423. int *ptr, uint32_t val, uint32_t saved)
  424. {
  425. uint32_t align =
  426. atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
  427. val, idx;
  428. struct atom_context *gctx = ctx->ctx;
  429. old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
  430. val <<= atom_arg_shift[align];
  431. val &= atom_arg_mask[align];
  432. saved &= ~atom_arg_mask[align];
  433. val |= saved;
  434. switch (arg) {
  435. case ATOM_ARG_REG:
  436. idx = U16(*ptr);
  437. (*ptr) += 2;
  438. DEBUG("REG[0x%04X]", idx);
  439. idx += gctx->reg_block;
  440. switch (gctx->io_mode) {
  441. case ATOM_IO_MM:
  442. if (idx == 0)
  443. gctx->card->reg_write(gctx->card, idx,
  444. val << 2);
  445. else
  446. gctx->card->reg_write(gctx->card, idx, val);
  447. break;
  448. case ATOM_IO_PCI:
  449. printk(KERN_INFO
  450. "PCI registers are not implemented.\n");
  451. return;
  452. case ATOM_IO_SYSIO:
  453. printk(KERN_INFO
  454. "SYSIO registers are not implemented.\n");
  455. return;
  456. default:
  457. if (!(gctx->io_mode & 0x80)) {
  458. printk(KERN_INFO "Bad IO mode.\n");
  459. return;
  460. }
  461. if (!gctx->iio[gctx->io_mode & 0xFF]) {
  462. printk(KERN_INFO
  463. "Undefined indirect IO write method %d.\n",
  464. gctx->io_mode & 0x7F);
  465. return;
  466. }
  467. atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
  468. idx, val);
  469. }
  470. break;
  471. case ATOM_ARG_PS:
  472. idx = U8(*ptr);
  473. (*ptr)++;
  474. DEBUG("PS[0x%02X]", idx);
  475. ctx->ps[idx] = cpu_to_le32(val);
  476. break;
  477. case ATOM_ARG_WS:
  478. idx = U8(*ptr);
  479. (*ptr)++;
  480. DEBUG("WS[0x%02X]", idx);
  481. switch (idx) {
  482. case ATOM_WS_QUOTIENT:
  483. gctx->divmul[0] = val;
  484. break;
  485. case ATOM_WS_REMAINDER:
  486. gctx->divmul[1] = val;
  487. break;
  488. case ATOM_WS_DATAPTR:
  489. gctx->data_block = val;
  490. break;
  491. case ATOM_WS_SHIFT:
  492. gctx->shift = val;
  493. break;
  494. case ATOM_WS_OR_MASK:
  495. case ATOM_WS_AND_MASK:
  496. break;
  497. case ATOM_WS_FB_WINDOW:
  498. gctx->fb_base = val;
  499. break;
  500. case ATOM_WS_ATTRIBUTES:
  501. gctx->io_attr = val;
  502. break;
  503. case ATOM_WS_REGPTR:
  504. gctx->reg_block = val;
  505. break;
  506. default:
  507. ctx->ws[idx] = val;
  508. }
  509. break;
  510. case ATOM_ARG_FB:
  511. idx = U8(*ptr);
  512. (*ptr)++;
  513. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  514. DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
  515. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  516. } else
  517. gctx->scratch[(gctx->fb_base / 4) + idx] = val;
  518. DEBUG("FB[0x%02X]", idx);
  519. break;
  520. case ATOM_ARG_PLL:
  521. idx = U8(*ptr);
  522. (*ptr)++;
  523. DEBUG("PLL[0x%02X]", idx);
  524. gctx->card->pll_write(gctx->card, idx, val);
  525. break;
  526. case ATOM_ARG_MC:
  527. idx = U8(*ptr);
  528. (*ptr)++;
  529. DEBUG("MC[0x%02X]", idx);
  530. gctx->card->mc_write(gctx->card, idx, val);
  531. return;
  532. }
  533. switch (align) {
  534. case ATOM_SRC_DWORD:
  535. DEBUG(".[31:0] <- 0x%08X\n", old_val);
  536. break;
  537. case ATOM_SRC_WORD0:
  538. DEBUG(".[15:0] <- 0x%04X\n", old_val);
  539. break;
  540. case ATOM_SRC_WORD8:
  541. DEBUG(".[23:8] <- 0x%04X\n", old_val);
  542. break;
  543. case ATOM_SRC_WORD16:
  544. DEBUG(".[31:16] <- 0x%04X\n", old_val);
  545. break;
  546. case ATOM_SRC_BYTE0:
  547. DEBUG(".[7:0] <- 0x%02X\n", old_val);
  548. break;
  549. case ATOM_SRC_BYTE8:
  550. DEBUG(".[15:8] <- 0x%02X\n", old_val);
  551. break;
  552. case ATOM_SRC_BYTE16:
  553. DEBUG(".[23:16] <- 0x%02X\n", old_val);
  554. break;
  555. case ATOM_SRC_BYTE24:
  556. DEBUG(".[31:24] <- 0x%02X\n", old_val);
  557. break;
  558. }
  559. }
  560. static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
  561. {
  562. uint8_t attr = U8((*ptr)++);
  563. uint32_t dst, src, saved;
  564. int dptr = *ptr;
  565. SDEBUG(" dst: ");
  566. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  567. SDEBUG(" src: ");
  568. src = atom_get_src(ctx, attr, ptr);
  569. dst += src;
  570. SDEBUG(" dst: ");
  571. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  572. }
  573. static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
  574. {
  575. uint8_t attr = U8((*ptr)++);
  576. uint32_t dst, src, saved;
  577. int dptr = *ptr;
  578. SDEBUG(" dst: ");
  579. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  580. SDEBUG(" src: ");
  581. src = atom_get_src(ctx, attr, ptr);
  582. dst &= src;
  583. SDEBUG(" dst: ");
  584. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  585. }
  586. static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
  587. {
  588. printk("ATOM BIOS beeped!\n");
  589. }
  590. static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
  591. {
  592. int idx = U8((*ptr)++);
  593. int r = 0;
  594. if (idx < ATOM_TABLE_NAMES_CNT)
  595. SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
  596. else
  597. SDEBUG(" table: %d\n", idx);
  598. if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
  599. r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
  600. if (r) {
  601. ctx->abort = true;
  602. }
  603. }
  604. static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
  605. {
  606. uint8_t attr = U8((*ptr)++);
  607. uint32_t saved;
  608. int dptr = *ptr;
  609. attr &= 0x38;
  610. attr |= atom_def_dst[attr >> 3] << 6;
  611. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  612. SDEBUG(" dst: ");
  613. atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
  614. }
  615. static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
  616. {
  617. uint8_t attr = U8((*ptr)++);
  618. uint32_t dst, src;
  619. SDEBUG(" src1: ");
  620. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  621. SDEBUG(" src2: ");
  622. src = atom_get_src(ctx, attr, ptr);
  623. ctx->ctx->cs_equal = (dst == src);
  624. ctx->ctx->cs_above = (dst > src);
  625. SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
  626. ctx->ctx->cs_above ? "GT" : "LE");
  627. }
  628. static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
  629. {
  630. unsigned count = U8((*ptr)++);
  631. SDEBUG(" count: %d\n", count);
  632. if (arg == ATOM_UNIT_MICROSEC)
  633. udelay(count);
  634. else if (!drm_can_sleep())
  635. mdelay(count);
  636. else
  637. msleep(count);
  638. }
  639. static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
  640. {
  641. uint8_t attr = U8((*ptr)++);
  642. uint32_t dst, src;
  643. SDEBUG(" src1: ");
  644. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  645. SDEBUG(" src2: ");
  646. src = atom_get_src(ctx, attr, ptr);
  647. if (src != 0) {
  648. ctx->ctx->divmul[0] = dst / src;
  649. ctx->ctx->divmul[1] = dst % src;
  650. } else {
  651. ctx->ctx->divmul[0] = 0;
  652. ctx->ctx->divmul[1] = 0;
  653. }
  654. }
  655. static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
  656. {
  657. uint64_t val64;
  658. uint8_t attr = U8((*ptr)++);
  659. uint32_t dst, src;
  660. SDEBUG(" src1: ");
  661. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  662. SDEBUG(" src2: ");
  663. src = atom_get_src(ctx, attr, ptr);
  664. if (src != 0) {
  665. val64 = dst;
  666. val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
  667. do_div(val64, src);
  668. ctx->ctx->divmul[0] = lower_32_bits(val64);
  669. ctx->ctx->divmul[1] = upper_32_bits(val64);
  670. } else {
  671. ctx->ctx->divmul[0] = 0;
  672. ctx->ctx->divmul[1] = 0;
  673. }
  674. }
  675. static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
  676. {
  677. /* functionally, a nop */
  678. }
  679. static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
  680. {
  681. int execute = 0, target = U16(*ptr);
  682. unsigned long cjiffies;
  683. (*ptr) += 2;
  684. switch (arg) {
  685. case ATOM_COND_ABOVE:
  686. execute = ctx->ctx->cs_above;
  687. break;
  688. case ATOM_COND_ABOVEOREQUAL:
  689. execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
  690. break;
  691. case ATOM_COND_ALWAYS:
  692. execute = 1;
  693. break;
  694. case ATOM_COND_BELOW:
  695. execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
  696. break;
  697. case ATOM_COND_BELOWOREQUAL:
  698. execute = !ctx->ctx->cs_above;
  699. break;
  700. case ATOM_COND_EQUAL:
  701. execute = ctx->ctx->cs_equal;
  702. break;
  703. case ATOM_COND_NOTEQUAL:
  704. execute = !ctx->ctx->cs_equal;
  705. break;
  706. }
  707. if (arg != ATOM_COND_ALWAYS)
  708. SDEBUG(" taken: %s\n", execute ? "yes" : "no");
  709. SDEBUG(" target: 0x%04X\n", target);
  710. if (execute) {
  711. if (ctx->last_jump == (ctx->start + target)) {
  712. cjiffies = jiffies;
  713. if (time_after(cjiffies, ctx->last_jump_jiffies)) {
  714. cjiffies -= ctx->last_jump_jiffies;
  715. if ((jiffies_to_msecs(cjiffies) > 5000)) {
  716. DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
  717. ctx->abort = true;
  718. }
  719. } else {
  720. /* jiffies wrap around we will just wait a little longer */
  721. ctx->last_jump_jiffies = jiffies;
  722. }
  723. } else {
  724. ctx->last_jump = ctx->start + target;
  725. ctx->last_jump_jiffies = jiffies;
  726. }
  727. *ptr = ctx->start + target;
  728. }
  729. }
  730. static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
  731. {
  732. uint8_t attr = U8((*ptr)++);
  733. uint32_t dst, mask, src, saved;
  734. int dptr = *ptr;
  735. SDEBUG(" dst: ");
  736. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  737. mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
  738. SDEBUG(" mask: 0x%08x", mask);
  739. SDEBUG(" src: ");
  740. src = atom_get_src(ctx, attr, ptr);
  741. dst &= mask;
  742. dst |= src;
  743. SDEBUG(" dst: ");
  744. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  745. }
  746. static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
  747. {
  748. uint8_t attr = U8((*ptr)++);
  749. uint32_t src, saved;
  750. int dptr = *ptr;
  751. if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
  752. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  753. else {
  754. atom_skip_dst(ctx, arg, attr, ptr);
  755. saved = 0xCDCDCDCD;
  756. }
  757. SDEBUG(" src: ");
  758. src = atom_get_src(ctx, attr, ptr);
  759. SDEBUG(" dst: ");
  760. atom_put_dst(ctx, arg, attr, &dptr, src, saved);
  761. }
  762. static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
  763. {
  764. uint8_t attr = U8((*ptr)++);
  765. uint32_t dst, src;
  766. SDEBUG(" src1: ");
  767. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  768. SDEBUG(" src2: ");
  769. src = atom_get_src(ctx, attr, ptr);
  770. ctx->ctx->divmul[0] = dst * src;
  771. }
  772. static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
  773. {
  774. uint64_t val64;
  775. uint8_t attr = U8((*ptr)++);
  776. uint32_t dst, src;
  777. SDEBUG(" src1: ");
  778. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  779. SDEBUG(" src2: ");
  780. src = atom_get_src(ctx, attr, ptr);
  781. val64 = (uint64_t)dst * (uint64_t)src;
  782. ctx->ctx->divmul[0] = lower_32_bits(val64);
  783. ctx->ctx->divmul[1] = upper_32_bits(val64);
  784. }
  785. static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
  786. {
  787. /* nothing */
  788. }
  789. static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
  790. {
  791. uint8_t attr = U8((*ptr)++);
  792. uint32_t dst, src, saved;
  793. int dptr = *ptr;
  794. SDEBUG(" dst: ");
  795. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  796. SDEBUG(" src: ");
  797. src = atom_get_src(ctx, attr, ptr);
  798. dst |= src;
  799. SDEBUG(" dst: ");
  800. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  801. }
  802. static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
  803. {
  804. uint8_t val = U8((*ptr)++);
  805. SDEBUG("POST card output: 0x%02X\n", val);
  806. }
  807. static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
  808. {
  809. printk(KERN_INFO "unimplemented!\n");
  810. }
  811. static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
  812. {
  813. printk(KERN_INFO "unimplemented!\n");
  814. }
  815. static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
  816. {
  817. printk(KERN_INFO "unimplemented!\n");
  818. }
  819. static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
  820. {
  821. int idx = U8(*ptr);
  822. (*ptr)++;
  823. SDEBUG(" block: %d\n", idx);
  824. if (!idx)
  825. ctx->ctx->data_block = 0;
  826. else if (idx == 255)
  827. ctx->ctx->data_block = ctx->start;
  828. else
  829. ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
  830. SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
  831. }
  832. static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
  833. {
  834. uint8_t attr = U8((*ptr)++);
  835. SDEBUG(" fb_base: ");
  836. ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
  837. }
  838. static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
  839. {
  840. int port;
  841. switch (arg) {
  842. case ATOM_PORT_ATI:
  843. port = U16(*ptr);
  844. if (port < ATOM_IO_NAMES_CNT)
  845. SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
  846. else
  847. SDEBUG(" port: %d\n", port);
  848. if (!port)
  849. ctx->ctx->io_mode = ATOM_IO_MM;
  850. else
  851. ctx->ctx->io_mode = ATOM_IO_IIO | port;
  852. (*ptr) += 2;
  853. break;
  854. case ATOM_PORT_PCI:
  855. ctx->ctx->io_mode = ATOM_IO_PCI;
  856. (*ptr)++;
  857. break;
  858. case ATOM_PORT_SYSIO:
  859. ctx->ctx->io_mode = ATOM_IO_SYSIO;
  860. (*ptr)++;
  861. break;
  862. }
  863. }
  864. static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
  865. {
  866. ctx->ctx->reg_block = U16(*ptr);
  867. (*ptr) += 2;
  868. SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
  869. }
  870. static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
  871. {
  872. uint8_t attr = U8((*ptr)++), shift;
  873. uint32_t saved, dst;
  874. int dptr = *ptr;
  875. attr &= 0x38;
  876. attr |= atom_def_dst[attr >> 3] << 6;
  877. SDEBUG(" dst: ");
  878. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  879. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  880. SDEBUG(" shift: %d\n", shift);
  881. dst <<= shift;
  882. SDEBUG(" dst: ");
  883. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  884. }
  885. static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
  886. {
  887. uint8_t attr = U8((*ptr)++), shift;
  888. uint32_t saved, dst;
  889. int dptr = *ptr;
  890. attr &= 0x38;
  891. attr |= atom_def_dst[attr >> 3] << 6;
  892. SDEBUG(" dst: ");
  893. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  894. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  895. SDEBUG(" shift: %d\n", shift);
  896. dst >>= shift;
  897. SDEBUG(" dst: ");
  898. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  899. }
  900. static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
  901. {
  902. uint8_t attr = U8((*ptr)++), shift;
  903. uint32_t saved, dst;
  904. int dptr = *ptr;
  905. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  906. SDEBUG(" dst: ");
  907. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  908. /* op needs to full dst value */
  909. dst = saved;
  910. shift = atom_get_src(ctx, attr, ptr);
  911. SDEBUG(" shift: %d\n", shift);
  912. dst <<= shift;
  913. dst &= atom_arg_mask[dst_align];
  914. dst >>= atom_arg_shift[dst_align];
  915. SDEBUG(" dst: ");
  916. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  917. }
  918. static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
  919. {
  920. uint8_t attr = U8((*ptr)++), shift;
  921. uint32_t saved, dst;
  922. int dptr = *ptr;
  923. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  924. SDEBUG(" dst: ");
  925. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  926. /* op needs to full dst value */
  927. dst = saved;
  928. shift = atom_get_src(ctx, attr, ptr);
  929. SDEBUG(" shift: %d\n", shift);
  930. dst >>= shift;
  931. dst &= atom_arg_mask[dst_align];
  932. dst >>= atom_arg_shift[dst_align];
  933. SDEBUG(" dst: ");
  934. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  935. }
  936. static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
  937. {
  938. uint8_t attr = U8((*ptr)++);
  939. uint32_t dst, src, saved;
  940. int dptr = *ptr;
  941. SDEBUG(" dst: ");
  942. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  943. SDEBUG(" src: ");
  944. src = atom_get_src(ctx, attr, ptr);
  945. dst -= src;
  946. SDEBUG(" dst: ");
  947. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  948. }
  949. static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
  950. {
  951. uint8_t attr = U8((*ptr)++);
  952. uint32_t src, val, target;
  953. SDEBUG(" switch: ");
  954. src = atom_get_src(ctx, attr, ptr);
  955. while (U16(*ptr) != ATOM_CASE_END)
  956. if (U8(*ptr) == ATOM_CASE_MAGIC) {
  957. (*ptr)++;
  958. SDEBUG(" case: ");
  959. val =
  960. atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
  961. ptr);
  962. target = U16(*ptr);
  963. if (val == src) {
  964. SDEBUG(" target: %04X\n", target);
  965. *ptr = ctx->start + target;
  966. return;
  967. }
  968. (*ptr) += 2;
  969. } else {
  970. printk(KERN_INFO "Bad case.\n");
  971. return;
  972. }
  973. (*ptr) += 2;
  974. }
  975. static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
  976. {
  977. uint8_t attr = U8((*ptr)++);
  978. uint32_t dst, src;
  979. SDEBUG(" src1: ");
  980. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  981. SDEBUG(" src2: ");
  982. src = atom_get_src(ctx, attr, ptr);
  983. ctx->ctx->cs_equal = ((dst & src) == 0);
  984. SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
  985. }
  986. static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
  987. {
  988. uint8_t attr = U8((*ptr)++);
  989. uint32_t dst, src, saved;
  990. int dptr = *ptr;
  991. SDEBUG(" dst: ");
  992. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  993. SDEBUG(" src: ");
  994. src = atom_get_src(ctx, attr, ptr);
  995. dst ^= src;
  996. SDEBUG(" dst: ");
  997. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  998. }
  999. static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
  1000. {
  1001. uint8_t val = U8((*ptr)++);
  1002. SDEBUG("DEBUG output: 0x%02X\n", val);
  1003. }
  1004. static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
  1005. {
  1006. uint16_t val = U16(*ptr);
  1007. (*ptr) += val + 2;
  1008. SDEBUG("PROCESSDS output: 0x%02X\n", val);
  1009. }
  1010. static struct {
  1011. void (*func) (atom_exec_context *, int *, int);
  1012. int arg;
  1013. } opcode_table[ATOM_OP_CNT] = {
  1014. {
  1015. NULL, 0}, {
  1016. atom_op_move, ATOM_ARG_REG}, {
  1017. atom_op_move, ATOM_ARG_PS}, {
  1018. atom_op_move, ATOM_ARG_WS}, {
  1019. atom_op_move, ATOM_ARG_FB}, {
  1020. atom_op_move, ATOM_ARG_PLL}, {
  1021. atom_op_move, ATOM_ARG_MC}, {
  1022. atom_op_and, ATOM_ARG_REG}, {
  1023. atom_op_and, ATOM_ARG_PS}, {
  1024. atom_op_and, ATOM_ARG_WS}, {
  1025. atom_op_and, ATOM_ARG_FB}, {
  1026. atom_op_and, ATOM_ARG_PLL}, {
  1027. atom_op_and, ATOM_ARG_MC}, {
  1028. atom_op_or, ATOM_ARG_REG}, {
  1029. atom_op_or, ATOM_ARG_PS}, {
  1030. atom_op_or, ATOM_ARG_WS}, {
  1031. atom_op_or, ATOM_ARG_FB}, {
  1032. atom_op_or, ATOM_ARG_PLL}, {
  1033. atom_op_or, ATOM_ARG_MC}, {
  1034. atom_op_shift_left, ATOM_ARG_REG}, {
  1035. atom_op_shift_left, ATOM_ARG_PS}, {
  1036. atom_op_shift_left, ATOM_ARG_WS}, {
  1037. atom_op_shift_left, ATOM_ARG_FB}, {
  1038. atom_op_shift_left, ATOM_ARG_PLL}, {
  1039. atom_op_shift_left, ATOM_ARG_MC}, {
  1040. atom_op_shift_right, ATOM_ARG_REG}, {
  1041. atom_op_shift_right, ATOM_ARG_PS}, {
  1042. atom_op_shift_right, ATOM_ARG_WS}, {
  1043. atom_op_shift_right, ATOM_ARG_FB}, {
  1044. atom_op_shift_right, ATOM_ARG_PLL}, {
  1045. atom_op_shift_right, ATOM_ARG_MC}, {
  1046. atom_op_mul, ATOM_ARG_REG}, {
  1047. atom_op_mul, ATOM_ARG_PS}, {
  1048. atom_op_mul, ATOM_ARG_WS}, {
  1049. atom_op_mul, ATOM_ARG_FB}, {
  1050. atom_op_mul, ATOM_ARG_PLL}, {
  1051. atom_op_mul, ATOM_ARG_MC}, {
  1052. atom_op_div, ATOM_ARG_REG}, {
  1053. atom_op_div, ATOM_ARG_PS}, {
  1054. atom_op_div, ATOM_ARG_WS}, {
  1055. atom_op_div, ATOM_ARG_FB}, {
  1056. atom_op_div, ATOM_ARG_PLL}, {
  1057. atom_op_div, ATOM_ARG_MC}, {
  1058. atom_op_add, ATOM_ARG_REG}, {
  1059. atom_op_add, ATOM_ARG_PS}, {
  1060. atom_op_add, ATOM_ARG_WS}, {
  1061. atom_op_add, ATOM_ARG_FB}, {
  1062. atom_op_add, ATOM_ARG_PLL}, {
  1063. atom_op_add, ATOM_ARG_MC}, {
  1064. atom_op_sub, ATOM_ARG_REG}, {
  1065. atom_op_sub, ATOM_ARG_PS}, {
  1066. atom_op_sub, ATOM_ARG_WS}, {
  1067. atom_op_sub, ATOM_ARG_FB}, {
  1068. atom_op_sub, ATOM_ARG_PLL}, {
  1069. atom_op_sub, ATOM_ARG_MC}, {
  1070. atom_op_setport, ATOM_PORT_ATI}, {
  1071. atom_op_setport, ATOM_PORT_PCI}, {
  1072. atom_op_setport, ATOM_PORT_SYSIO}, {
  1073. atom_op_setregblock, 0}, {
  1074. atom_op_setfbbase, 0}, {
  1075. atom_op_compare, ATOM_ARG_REG}, {
  1076. atom_op_compare, ATOM_ARG_PS}, {
  1077. atom_op_compare, ATOM_ARG_WS}, {
  1078. atom_op_compare, ATOM_ARG_FB}, {
  1079. atom_op_compare, ATOM_ARG_PLL}, {
  1080. atom_op_compare, ATOM_ARG_MC}, {
  1081. atom_op_switch, 0}, {
  1082. atom_op_jump, ATOM_COND_ALWAYS}, {
  1083. atom_op_jump, ATOM_COND_EQUAL}, {
  1084. atom_op_jump, ATOM_COND_BELOW}, {
  1085. atom_op_jump, ATOM_COND_ABOVE}, {
  1086. atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
  1087. atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
  1088. atom_op_jump, ATOM_COND_NOTEQUAL}, {
  1089. atom_op_test, ATOM_ARG_REG}, {
  1090. atom_op_test, ATOM_ARG_PS}, {
  1091. atom_op_test, ATOM_ARG_WS}, {
  1092. atom_op_test, ATOM_ARG_FB}, {
  1093. atom_op_test, ATOM_ARG_PLL}, {
  1094. atom_op_test, ATOM_ARG_MC}, {
  1095. atom_op_delay, ATOM_UNIT_MILLISEC}, {
  1096. atom_op_delay, ATOM_UNIT_MICROSEC}, {
  1097. atom_op_calltable, 0}, {
  1098. atom_op_repeat, 0}, {
  1099. atom_op_clear, ATOM_ARG_REG}, {
  1100. atom_op_clear, ATOM_ARG_PS}, {
  1101. atom_op_clear, ATOM_ARG_WS}, {
  1102. atom_op_clear, ATOM_ARG_FB}, {
  1103. atom_op_clear, ATOM_ARG_PLL}, {
  1104. atom_op_clear, ATOM_ARG_MC}, {
  1105. atom_op_nop, 0}, {
  1106. atom_op_eot, 0}, {
  1107. atom_op_mask, ATOM_ARG_REG}, {
  1108. atom_op_mask, ATOM_ARG_PS}, {
  1109. atom_op_mask, ATOM_ARG_WS}, {
  1110. atom_op_mask, ATOM_ARG_FB}, {
  1111. atom_op_mask, ATOM_ARG_PLL}, {
  1112. atom_op_mask, ATOM_ARG_MC}, {
  1113. atom_op_postcard, 0}, {
  1114. atom_op_beep, 0}, {
  1115. atom_op_savereg, 0}, {
  1116. atom_op_restorereg, 0}, {
  1117. atom_op_setdatablock, 0}, {
  1118. atom_op_xor, ATOM_ARG_REG}, {
  1119. atom_op_xor, ATOM_ARG_PS}, {
  1120. atom_op_xor, ATOM_ARG_WS}, {
  1121. atom_op_xor, ATOM_ARG_FB}, {
  1122. atom_op_xor, ATOM_ARG_PLL}, {
  1123. atom_op_xor, ATOM_ARG_MC}, {
  1124. atom_op_shl, ATOM_ARG_REG}, {
  1125. atom_op_shl, ATOM_ARG_PS}, {
  1126. atom_op_shl, ATOM_ARG_WS}, {
  1127. atom_op_shl, ATOM_ARG_FB}, {
  1128. atom_op_shl, ATOM_ARG_PLL}, {
  1129. atom_op_shl, ATOM_ARG_MC}, {
  1130. atom_op_shr, ATOM_ARG_REG}, {
  1131. atom_op_shr, ATOM_ARG_PS}, {
  1132. atom_op_shr, ATOM_ARG_WS}, {
  1133. atom_op_shr, ATOM_ARG_FB}, {
  1134. atom_op_shr, ATOM_ARG_PLL}, {
  1135. atom_op_shr, ATOM_ARG_MC}, {
  1136. atom_op_debug, 0}, {
  1137. atom_op_processds, 0}, {
  1138. atom_op_mul32, ATOM_ARG_PS}, {
  1139. atom_op_mul32, ATOM_ARG_WS}, {
  1140. atom_op_div32, ATOM_ARG_PS}, {
  1141. atom_op_div32, ATOM_ARG_WS},
  1142. };
  1143. static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
  1144. {
  1145. int base = CU16(ctx->cmd_table + 4 + 2 * index);
  1146. int len, ws, ps, ptr;
  1147. unsigned char op;
  1148. atom_exec_context ectx;
  1149. int ret = 0;
  1150. if (!base)
  1151. return -EINVAL;
  1152. len = CU16(base + ATOM_CT_SIZE_PTR);
  1153. ws = CU8(base + ATOM_CT_WS_PTR);
  1154. ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
  1155. ptr = base + ATOM_CT_CODE_PTR;
  1156. SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
  1157. ectx.ctx = ctx;
  1158. ectx.ps_shift = ps / 4;
  1159. ectx.start = base;
  1160. ectx.ps = params;
  1161. ectx.abort = false;
  1162. ectx.last_jump = 0;
  1163. if (ws)
  1164. ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
  1165. else
  1166. ectx.ws = NULL;
  1167. debug_depth++;
  1168. while (1) {
  1169. op = CU8(ptr++);
  1170. if (op < ATOM_OP_NAMES_CNT)
  1171. SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
  1172. else
  1173. SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
  1174. if (ectx.abort) {
  1175. DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
  1176. base, len, ws, ps, ptr - 1);
  1177. ret = -EINVAL;
  1178. goto free;
  1179. }
  1180. if (op < ATOM_OP_CNT && op > 0)
  1181. opcode_table[op].func(&ectx, &ptr,
  1182. opcode_table[op].arg);
  1183. else
  1184. break;
  1185. if (op == ATOM_OP_EOT)
  1186. break;
  1187. }
  1188. debug_depth--;
  1189. SDEBUG("<<\n");
  1190. free:
  1191. if (ws)
  1192. kfree(ectx.ws);
  1193. return ret;
  1194. }
  1195. int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
  1196. {
  1197. int r;
  1198. mutex_lock(&ctx->mutex);
  1199. /* reset data block */
  1200. ctx->data_block = 0;
  1201. /* reset reg block */
  1202. ctx->reg_block = 0;
  1203. /* reset fb window */
  1204. ctx->fb_base = 0;
  1205. /* reset io mode */
  1206. ctx->io_mode = ATOM_IO_MM;
  1207. /* reset divmul */
  1208. ctx->divmul[0] = 0;
  1209. ctx->divmul[1] = 0;
  1210. r = amdgpu_atom_execute_table_locked(ctx, index, params);
  1211. mutex_unlock(&ctx->mutex);
  1212. return r;
  1213. }
  1214. static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
  1215. static void atom_index_iio(struct atom_context *ctx, int base)
  1216. {
  1217. ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
  1218. if (!ctx->iio)
  1219. return;
  1220. while (CU8(base) == ATOM_IIO_START) {
  1221. ctx->iio[CU8(base + 1)] = base + 2;
  1222. base += 2;
  1223. while (CU8(base) != ATOM_IIO_END)
  1224. base += atom_iio_len[CU8(base)];
  1225. base += 3;
  1226. }
  1227. }
  1228. struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
  1229. {
  1230. int base;
  1231. struct atom_context *ctx =
  1232. kzalloc(sizeof(struct atom_context), GFP_KERNEL);
  1233. char *str;
  1234. char name[512];
  1235. int i;
  1236. if (!ctx)
  1237. return NULL;
  1238. ctx->card = card;
  1239. ctx->bios = bios;
  1240. if (CU16(0) != ATOM_BIOS_MAGIC) {
  1241. printk(KERN_INFO "Invalid BIOS magic.\n");
  1242. kfree(ctx);
  1243. return NULL;
  1244. }
  1245. if (strncmp
  1246. (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
  1247. strlen(ATOM_ATI_MAGIC))) {
  1248. printk(KERN_INFO "Invalid ATI magic.\n");
  1249. kfree(ctx);
  1250. return NULL;
  1251. }
  1252. base = CU16(ATOM_ROM_TABLE_PTR);
  1253. if (strncmp
  1254. (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
  1255. strlen(ATOM_ROM_MAGIC))) {
  1256. printk(KERN_INFO "Invalid ATOM magic.\n");
  1257. kfree(ctx);
  1258. return NULL;
  1259. }
  1260. ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
  1261. ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
  1262. atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
  1263. if (!ctx->iio) {
  1264. amdgpu_atom_destroy(ctx);
  1265. return NULL;
  1266. }
  1267. str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
  1268. while (*str && ((*str == '\n') || (*str == '\r')))
  1269. str++;
  1270. /* name string isn't always 0 terminated */
  1271. for (i = 0; i < 511; i++) {
  1272. name[i] = str[i];
  1273. if (name[i] < '.' || name[i] > 'z') {
  1274. name[i] = 0;
  1275. break;
  1276. }
  1277. }
  1278. printk(KERN_INFO "ATOM BIOS: %s\n", name);
  1279. return ctx;
  1280. }
  1281. int amdgpu_atom_asic_init(struct atom_context *ctx)
  1282. {
  1283. int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
  1284. uint32_t ps[16];
  1285. int ret;
  1286. memset(ps, 0, 64);
  1287. ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
  1288. ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
  1289. if (!ps[0] || !ps[1])
  1290. return 1;
  1291. if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
  1292. return 1;
  1293. ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
  1294. if (ret)
  1295. return ret;
  1296. memset(ps, 0, 64);
  1297. return ret;
  1298. }
  1299. void amdgpu_atom_destroy(struct atom_context *ctx)
  1300. {
  1301. kfree(ctx->iio);
  1302. kfree(ctx);
  1303. }
  1304. bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
  1305. uint16_t * size, uint8_t * frev, uint8_t * crev,
  1306. uint16_t * data_start)
  1307. {
  1308. int offset = index * 2 + 4;
  1309. int idx = CU16(ctx->data_table + offset);
  1310. u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
  1311. if (!mdt[index])
  1312. return false;
  1313. if (size)
  1314. *size = CU16(idx);
  1315. if (frev)
  1316. *frev = CU8(idx + 2);
  1317. if (crev)
  1318. *crev = CU8(idx + 3);
  1319. *data_start = idx;
  1320. return true;
  1321. }
  1322. bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
  1323. uint8_t * crev)
  1324. {
  1325. int offset = index * 2 + 4;
  1326. int idx = CU16(ctx->cmd_table + offset);
  1327. u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
  1328. if (!mct[index])
  1329. return false;
  1330. if (frev)
  1331. *frev = CU8(idx + 2);
  1332. if (crev)
  1333. *crev = CU8(idx + 3);
  1334. return true;
  1335. }
  1336. int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx)
  1337. {
  1338. int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
  1339. uint16_t data_offset;
  1340. int usage_bytes = 0;
  1341. struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
  1342. if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
  1343. firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
  1344. DRM_DEBUG("atom firmware requested %08x %dkb\n",
  1345. le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
  1346. le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
  1347. usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
  1348. }
  1349. ctx->scratch_size_bytes = 0;
  1350. if (usage_bytes == 0)
  1351. usage_bytes = 20 * 1024;
  1352. /* allocate some scratch memory */
  1353. ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
  1354. if (!ctx->scratch)
  1355. return -ENOMEM;
  1356. ctx->scratch_size_bytes = usage_bytes;
  1357. return 0;
  1358. }