grukdump.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * SN Platform GRU Driver
  3. *
  4. * Dump GRU State
  5. *
  6. * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/delay.h>
  27. #include <linux/bitops.h>
  28. #include <asm/uv/uv_hub.h>
  29. #include <linux/nospec.h>
  30. #include "gru.h"
  31. #include "grutables.h"
  32. #include "gruhandles.h"
  33. #include "grulib.h"
  34. #define CCH_LOCK_ATTEMPTS 10
  35. static int gru_user_copy_handle(void __user **dp, void *s)
  36. {
  37. if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
  38. return -1;
  39. *dp += GRU_HANDLE_BYTES;
  40. return 0;
  41. }
  42. static int gru_dump_context_data(void *grubase,
  43. struct gru_context_configuration_handle *cch,
  44. void __user *ubuf, int ctxnum, int dsrcnt,
  45. int flush_cbrs)
  46. {
  47. void *cb, *cbe, *tfh, *gseg;
  48. int i, scr;
  49. gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
  50. cb = gseg + GRU_CB_BASE;
  51. cbe = grubase + GRU_CBE_BASE;
  52. tfh = grubase + GRU_TFH_BASE;
  53. for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
  54. if (flush_cbrs)
  55. gru_flush_cache(cb);
  56. if (gru_user_copy_handle(&ubuf, cb))
  57. goto fail;
  58. if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
  59. goto fail;
  60. if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
  61. goto fail;
  62. cb += GRU_HANDLE_STRIDE;
  63. }
  64. if (dsrcnt)
  65. memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
  66. return 0;
  67. fail:
  68. return -EFAULT;
  69. }
  70. static int gru_dump_tfm(struct gru_state *gru,
  71. void __user *ubuf, void __user *ubufend)
  72. {
  73. struct gru_tlb_fault_map *tfm;
  74. int i;
  75. if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
  76. return -EFBIG;
  77. for (i = 0; i < GRU_NUM_TFM; i++) {
  78. tfm = get_tfm(gru->gs_gru_base_vaddr, i);
  79. if (gru_user_copy_handle(&ubuf, tfm))
  80. goto fail;
  81. }
  82. return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
  83. fail:
  84. return -EFAULT;
  85. }
  86. static int gru_dump_tgh(struct gru_state *gru,
  87. void __user *ubuf, void __user *ubufend)
  88. {
  89. struct gru_tlb_global_handle *tgh;
  90. int i;
  91. if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
  92. return -EFBIG;
  93. for (i = 0; i < GRU_NUM_TGH; i++) {
  94. tgh = get_tgh(gru->gs_gru_base_vaddr, i);
  95. if (gru_user_copy_handle(&ubuf, tgh))
  96. goto fail;
  97. }
  98. return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
  99. fail:
  100. return -EFAULT;
  101. }
  102. static int gru_dump_context(struct gru_state *gru, int ctxnum,
  103. void __user *ubuf, void __user *ubufend, char data_opt,
  104. char lock_cch, char flush_cbrs)
  105. {
  106. struct gru_dump_context_header hdr;
  107. struct gru_dump_context_header __user *uhdr = ubuf;
  108. struct gru_context_configuration_handle *cch, *ubufcch;
  109. struct gru_thread_state *gts;
  110. int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
  111. void *grubase;
  112. memset(&hdr, 0, sizeof(hdr));
  113. grubase = gru->gs_gru_base_vaddr;
  114. cch = get_cch(grubase, ctxnum);
  115. for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
  116. cch_locked = trylock_cch_handle(cch);
  117. if (cch_locked)
  118. break;
  119. msleep(1);
  120. }
  121. ubuf += sizeof(hdr);
  122. ubufcch = ubuf;
  123. if (gru_user_copy_handle(&ubuf, cch)) {
  124. if (cch_locked)
  125. unlock_cch_handle(cch);
  126. return -EFAULT;
  127. }
  128. if (cch_locked)
  129. ubufcch->delresp = 0;
  130. bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
  131. if (cch_locked || !lock_cch) {
  132. gts = gru->gs_gts[ctxnum];
  133. if (gts && gts->ts_vma) {
  134. hdr.pid = gts->ts_tgid_owner;
  135. hdr.vaddr = gts->ts_vma->vm_start;
  136. }
  137. if (cch->state != CCHSTATE_INACTIVE) {
  138. cbrcnt = hweight64(cch->cbr_allocation_map) *
  139. GRU_CBR_AU_SIZE;
  140. dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
  141. GRU_DSR_AU_CL : 0;
  142. }
  143. bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
  144. if (bytes > ubufend - ubuf)
  145. ret = -EFBIG;
  146. else
  147. ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
  148. dsrcnt, flush_cbrs);
  149. }
  150. if (cch_locked)
  151. unlock_cch_handle(cch);
  152. if (ret)
  153. return ret;
  154. hdr.magic = GRU_DUMP_MAGIC;
  155. hdr.gid = gru->gs_gid;
  156. hdr.ctxnum = ctxnum;
  157. hdr.cbrcnt = cbrcnt;
  158. hdr.dsrcnt = dsrcnt;
  159. hdr.cch_locked = cch_locked;
  160. if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
  161. return -EFAULT;
  162. return bytes;
  163. }
  164. int gru_dump_chiplet_request(unsigned long arg)
  165. {
  166. struct gru_state *gru;
  167. struct gru_dump_chiplet_state_req req;
  168. void __user *ubuf;
  169. void __user *ubufend;
  170. int ctxnum, ret, cnt = 0;
  171. if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  172. return -EFAULT;
  173. /* Currently, only dump by gid is implemented */
  174. if (req.gid >= gru_max_gids)
  175. return -EINVAL;
  176. req.gid = array_index_nospec(req.gid, gru_max_gids);
  177. gru = GID_TO_GRU(req.gid);
  178. ubuf = req.buf;
  179. ubufend = req.buf + req.buflen;
  180. ret = gru_dump_tfm(gru, ubuf, ubufend);
  181. if (ret < 0)
  182. goto fail;
  183. ubuf += ret;
  184. ret = gru_dump_tgh(gru, ubuf, ubufend);
  185. if (ret < 0)
  186. goto fail;
  187. ubuf += ret;
  188. for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
  189. if (req.ctxnum == ctxnum || req.ctxnum < 0) {
  190. ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
  191. req.data_opt, req.lock_cch,
  192. req.flush_cbrs);
  193. if (ret < 0)
  194. goto fail;
  195. ubuf += ret;
  196. cnt++;
  197. }
  198. }
  199. if (copy_to_user((void __user *)arg, &req, sizeof(req)))
  200. return -EFAULT;
  201. return cnt;
  202. fail:
  203. return ret;
  204. }