sun4v_ivec.S 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /* sun4v_ivec.S: Sun4v interrupt vector handling.
  2. *
  3. * Copyright (C) 2006 <davem@davemloft.net>
  4. */
  5. #include <asm/cpudata.h>
  6. #include <asm/intr_queue.h>
  7. #include <asm/pil.h>
  8. .text
  9. .align 32
  10. sun4v_cpu_mondo:
  11. /* Head offset in %g2, tail offset in %g4.
  12. * If they are the same, no work.
  13. */
  14. mov INTRQ_CPU_MONDO_HEAD, %g2
  15. ldxa [%g2] ASI_QUEUE, %g2
  16. mov INTRQ_CPU_MONDO_TAIL, %g4
  17. ldxa [%g4] ASI_QUEUE, %g4
  18. cmp %g2, %g4
  19. be,pn %xcc, sun4v_cpu_mondo_queue_empty
  20. nop
  21. /* Get &trap_block[smp_processor_id()] into %g4. */
  22. ldxa [%g0] ASI_SCRATCHPAD, %g4
  23. sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
  24. /* Get smp_processor_id() into %g3 */
  25. sethi %hi(trap_block), %g5
  26. or %g5, %lo(trap_block), %g5
  27. sub %g4, %g5, %g3
  28. srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
  29. /* Increment cpu_mondo_counter[smp_processor_id()] */
  30. sethi %hi(cpu_mondo_counter), %g5
  31. or %g5, %lo(cpu_mondo_counter), %g5
  32. sllx %g3, 3, %g3
  33. add %g5, %g3, %g5
  34. ldx [%g5], %g3
  35. add %g3, 1, %g3
  36. stx %g3, [%g5]
  37. /* Get CPU mondo queue base phys address into %g7. */
  38. ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
  39. /* Now get the cross-call arguments and handler PC, same
  40. * layout as sun4u:
  41. *
  42. * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
  43. * high half is context arg to MMU flushes, into %g5
  44. * 2nd 64-bit word: 64-bit arg, load into %g1
  45. * 3rd 64-bit word: 64-bit arg, load into %g7
  46. */
  47. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
  48. add %g2, 0x8, %g2
  49. srlx %g3, 32, %g5
  50. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  51. add %g2, 0x8, %g2
  52. srl %g3, 0, %g3
  53. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
  54. add %g2, 0x40 - 0x8 - 0x8, %g2
  55. /* Update queue head pointer. */
  56. lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
  57. and %g2, %g4, %g2
  58. mov INTRQ_CPU_MONDO_HEAD, %g4
  59. stxa %g2, [%g4] ASI_QUEUE
  60. membar #Sync
  61. jmpl %g3, %g0
  62. nop
  63. sun4v_cpu_mondo_queue_empty:
  64. retry
  65. sun4v_dev_mondo:
  66. /* Head offset in %g2, tail offset in %g4. */
  67. mov INTRQ_DEVICE_MONDO_HEAD, %g2
  68. ldxa [%g2] ASI_QUEUE, %g2
  69. mov INTRQ_DEVICE_MONDO_TAIL, %g4
  70. ldxa [%g4] ASI_QUEUE, %g4
  71. cmp %g2, %g4
  72. be,pn %xcc, sun4v_dev_mondo_queue_empty
  73. nop
  74. /* Get &trap_block[smp_processor_id()] into %g4. */
  75. ldxa [%g0] ASI_SCRATCHPAD, %g4
  76. sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
  77. /* Get DEV mondo queue base phys address into %g5. */
  78. ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
  79. /* Load IVEC into %g3. */
  80. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  81. add %g2, 0x40, %g2
  82. /* XXX There can be a full 64-byte block of data here.
  83. * XXX This is how we can get at MSI vector data.
  84. * XXX Current we do not capture this, but when we do we'll
  85. * XXX need to add a 64-byte storage area in the struct ino_bucket
  86. * XXX or the struct irq_desc.
  87. */
  88. /* Update queue head pointer, this frees up some registers. */
  89. lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
  90. and %g2, %g4, %g2
  91. mov INTRQ_DEVICE_MONDO_HEAD, %g4
  92. stxa %g2, [%g4] ASI_QUEUE
  93. membar #Sync
  94. TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
  95. /* For VIRQs, cookie is encoded as ~bucket_phys_addr */
  96. brlz,pt %g3, 1f
  97. xnor %g3, %g0, %g4
  98. /* Get __pa(&ivector_table[IVEC]) into %g4. */
  99. sethi %hi(ivector_table_pa), %g4
  100. ldx [%g4 + %lo(ivector_table_pa)], %g4
  101. sllx %g3, 4, %g3
  102. add %g4, %g3, %g4
  103. 1: ldx [%g1], %g2
  104. stxa %g2, [%g4] ASI_PHYS_USE_EC
  105. stx %g4, [%g1]
  106. /* Signal the interrupt by setting (1 << pil) in %softint. */
  107. wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
  108. sun4v_dev_mondo_queue_empty:
  109. retry
  110. sun4v_res_mondo:
  111. /* Head offset in %g2, tail offset in %g4. */
  112. mov INTRQ_RESUM_MONDO_HEAD, %g2
  113. ldxa [%g2] ASI_QUEUE, %g2
  114. mov INTRQ_RESUM_MONDO_TAIL, %g4
  115. ldxa [%g4] ASI_QUEUE, %g4
  116. cmp %g2, %g4
  117. be,pn %xcc, sun4v_res_mondo_queue_empty
  118. nop
  119. /* Get &trap_block[smp_processor_id()] into %g3. */
  120. ldxa [%g0] ASI_SCRATCHPAD, %g3
  121. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  122. /* Get RES mondo queue base phys address into %g5. */
  123. ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
  124. /* Get RES kernel buffer base phys address into %g7. */
  125. ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
  126. /* If the first word is non-zero, queue is full. */
  127. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  128. brnz,pn %g1, sun4v_res_mondo_queue_full
  129. nop
  130. lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
  131. /* Remember this entry's offset in %g1. */
  132. mov %g2, %g1
  133. /* Copy 64-byte queue entry into kernel buffer. */
  134. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  135. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  136. add %g2, 0x08, %g2
  137. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  138. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  139. add %g2, 0x08, %g2
  140. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  141. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  142. add %g2, 0x08, %g2
  143. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  144. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  145. add %g2, 0x08, %g2
  146. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  147. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  148. add %g2, 0x08, %g2
  149. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  150. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  151. add %g2, 0x08, %g2
  152. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  153. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  154. add %g2, 0x08, %g2
  155. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  156. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  157. add %g2, 0x08, %g2
  158. /* Update queue head pointer. */
  159. and %g2, %g4, %g2
  160. mov INTRQ_RESUM_MONDO_HEAD, %g4
  161. stxa %g2, [%g4] ASI_QUEUE
  162. membar #Sync
  163. /* Disable interrupts and save register state so we can call
  164. * C code. The etrap handling will leave %g4 in %l4 for us
  165. * when it's done.
  166. */
  167. rdpr %pil, %g2
  168. wrpr %g0, PIL_NORMAL_MAX, %pil
  169. mov %g1, %g4
  170. ba,pt %xcc, etrap_irq
  171. rd %pc, %g7
  172. #ifdef CONFIG_TRACE_IRQFLAGS
  173. call trace_hardirqs_off
  174. nop
  175. #endif
  176. /* Log the event. */
  177. add %sp, PTREGS_OFF, %o0
  178. call sun4v_resum_error
  179. mov %l4, %o1
  180. /* Return from trap. */
  181. ba,pt %xcc, rtrap_irq
  182. nop
  183. sun4v_res_mondo_queue_empty:
  184. retry
  185. sun4v_res_mondo_queue_full:
  186. /* The queue is full, consolidate our damage by setting
  187. * the head equal to the tail. We'll just trap again otherwise.
  188. * Call C code to log the event.
  189. */
  190. mov INTRQ_RESUM_MONDO_HEAD, %g2
  191. stxa %g4, [%g2] ASI_QUEUE
  192. membar #Sync
  193. rdpr %pil, %g2
  194. wrpr %g0, PIL_NORMAL_MAX, %pil
  195. ba,pt %xcc, etrap_irq
  196. rd %pc, %g7
  197. #ifdef CONFIG_TRACE_IRQFLAGS
  198. call trace_hardirqs_off
  199. nop
  200. #endif
  201. call sun4v_resum_overflow
  202. add %sp, PTREGS_OFF, %o0
  203. ba,pt %xcc, rtrap_irq
  204. nop
  205. sun4v_nonres_mondo:
  206. /* Head offset in %g2, tail offset in %g4. */
  207. mov INTRQ_NONRESUM_MONDO_HEAD, %g2
  208. ldxa [%g2] ASI_QUEUE, %g2
  209. mov INTRQ_NONRESUM_MONDO_TAIL, %g4
  210. ldxa [%g4] ASI_QUEUE, %g4
  211. cmp %g2, %g4
  212. be,pn %xcc, sun4v_nonres_mondo_queue_empty
  213. nop
  214. /* Get &trap_block[smp_processor_id()] into %g3. */
  215. ldxa [%g0] ASI_SCRATCHPAD, %g3
  216. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  217. /* Get RES mondo queue base phys address into %g5. */
  218. ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
  219. /* Get RES kernel buffer base phys address into %g7. */
  220. ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
  221. /* If the first word is non-zero, queue is full. */
  222. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  223. brnz,pn %g1, sun4v_nonres_mondo_queue_full
  224. nop
  225. lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
  226. /* Remember this entry's offset in %g1. */
  227. mov %g2, %g1
  228. /* Copy 64-byte queue entry into kernel buffer. */
  229. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  230. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  231. add %g2, 0x08, %g2
  232. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  233. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  234. add %g2, 0x08, %g2
  235. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  236. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  237. add %g2, 0x08, %g2
  238. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  239. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  240. add %g2, 0x08, %g2
  241. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  242. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  243. add %g2, 0x08, %g2
  244. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  245. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  246. add %g2, 0x08, %g2
  247. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  248. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  249. add %g2, 0x08, %g2
  250. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  251. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  252. add %g2, 0x08, %g2
  253. /* Update queue head pointer. */
  254. and %g2, %g4, %g2
  255. mov INTRQ_NONRESUM_MONDO_HEAD, %g4
  256. stxa %g2, [%g4] ASI_QUEUE
  257. membar #Sync
  258. /* Disable interrupts and save register state so we can call
  259. * C code. The etrap handling will leave %g4 in %l4 for us
  260. * when it's done.
  261. */
  262. rdpr %pil, %g2
  263. wrpr %g0, PIL_NORMAL_MAX, %pil
  264. mov %g1, %g4
  265. ba,pt %xcc, etrap_irq
  266. rd %pc, %g7
  267. #ifdef CONFIG_TRACE_IRQFLAGS
  268. call trace_hardirqs_off
  269. nop
  270. #endif
  271. /* Log the event. */
  272. add %sp, PTREGS_OFF, %o0
  273. call sun4v_nonresum_error
  274. mov %l4, %o1
  275. /* Return from trap. */
  276. ba,pt %xcc, rtrap_irq
  277. nop
  278. sun4v_nonres_mondo_queue_empty:
  279. retry
  280. sun4v_nonres_mondo_queue_full:
  281. /* The queue is full, consolidate our damage by setting
  282. * the head equal to the tail. We'll just trap again otherwise.
  283. * Call C code to log the event.
  284. */
  285. mov INTRQ_NONRESUM_MONDO_HEAD, %g2
  286. stxa %g4, [%g2] ASI_QUEUE
  287. membar #Sync
  288. rdpr %pil, %g2
  289. wrpr %g0, PIL_NORMAL_MAX, %pil
  290. ba,pt %xcc, etrap_irq
  291. rd %pc, %g7
  292. #ifdef CONFIG_TRACE_IRQFLAGS
  293. call trace_hardirqs_off
  294. nop
  295. #endif
  296. call sun4v_nonresum_overflow
  297. add %sp, PTREGS_OFF, %o0
  298. ba,pt %xcc, rtrap_irq
  299. nop