kfd_device_queue_manager_vi.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "kfd_device_queue_manager.h"
  24. #include "gca/gfx_8_0_enum.h"
  25. #include "gca/gfx_8_0_sh_mask.h"
  26. #include "gca/gfx_8_0_enum.h"
  27. #include "oss/oss_3_0_sh_mask.h"
  28. static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
  29. struct qcm_process_device *qpd,
  30. enum cache_policy default_policy,
  31. enum cache_policy alternate_policy,
  32. void __user *alternate_aperture_base,
  33. uint64_t alternate_aperture_size);
  34. static int register_process_vi(struct device_queue_manager *dqm,
  35. struct qcm_process_device *qpd);
  36. static int initialize_cpsch_vi(struct device_queue_manager *dqm);
  37. static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
  38. struct qcm_process_device *qpd);
  39. void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
  40. {
  41. ops->set_cache_memory_policy = set_cache_memory_policy_vi;
  42. ops->register_process = register_process_vi;
  43. ops->initialize = initialize_cpsch_vi;
  44. ops->init_sdma_vm = init_sdma_vm;
  45. }
  46. static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
  47. {
  48. /* In 64-bit mode, we can only control the top 3 bits of the LDS,
  49. * scratch and GPUVM apertures.
  50. * The hardware fills in the remaining 59 bits according to the
  51. * following pattern:
  52. * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
  53. * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
  54. * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
  55. *
  56. * (where X/Y is the configurable nybble with the low-bit 0)
  57. *
  58. * LDS and scratch will have the same top nybble programmed in the
  59. * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
  60. * GPUVM can have a different top nybble programmed in the
  61. * top 3 bits of SH_MEM_BASES.SHARED_BASE.
  62. * We don't bother to support different top nybbles
  63. * for LDS/Scratch and GPUVM.
  64. */
  65. BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
  66. top_address_nybble == 0);
  67. return top_address_nybble << 12 |
  68. (top_address_nybble << 12) <<
  69. SH_MEM_BASES__SHARED_BASE__SHIFT;
  70. }
  71. static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
  72. struct qcm_process_device *qpd,
  73. enum cache_policy default_policy,
  74. enum cache_policy alternate_policy,
  75. void __user *alternate_aperture_base,
  76. uint64_t alternate_aperture_size)
  77. {
  78. uint32_t default_mtype;
  79. uint32_t ape1_mtype;
  80. default_mtype = (default_policy == cache_policy_coherent) ?
  81. MTYPE_CC :
  82. MTYPE_NC;
  83. ape1_mtype = (alternate_policy == cache_policy_coherent) ?
  84. MTYPE_CC :
  85. MTYPE_NC;
  86. qpd->sh_mem_config = (qpd->sh_mem_config &
  87. SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
  88. SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
  89. SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
  90. default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
  91. ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
  92. SH_MEM_CONFIG__PRIVATE_ATC_MASK;
  93. return true;
  94. }
  95. static int register_process_vi(struct device_queue_manager *dqm,
  96. struct qcm_process_device *qpd)
  97. {
  98. struct kfd_process_device *pdd;
  99. unsigned int temp;
  100. BUG_ON(!dqm || !qpd);
  101. pdd = qpd_to_pdd(qpd);
  102. /* check if sh_mem_config register already configured */
  103. if (qpd->sh_mem_config == 0) {
  104. qpd->sh_mem_config =
  105. SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
  106. SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
  107. MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
  108. MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
  109. SH_MEM_CONFIG__PRIVATE_ATC_MASK;
  110. qpd->sh_mem_ape1_limit = 0;
  111. qpd->sh_mem_ape1_base = 0;
  112. }
  113. if (qpd->pqm->process->is_32bit_user_mode) {
  114. temp = get_sh_mem_bases_32(pdd);
  115. qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
  116. qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
  117. SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
  118. } else {
  119. temp = get_sh_mem_bases_nybble_64(pdd);
  120. qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
  121. qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
  122. SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
  123. }
  124. pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
  125. qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
  126. return 0;
  127. }
  128. static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
  129. struct qcm_process_device *qpd)
  130. {
  131. uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
  132. if (q->process->is_32bit_user_mode)
  133. value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
  134. get_sh_mem_bases_32(qpd_to_pdd(qpd));
  135. else
  136. value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
  137. SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
  138. SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
  139. q->properties.sdma_vm_addr = value;
  140. }
  141. static int initialize_cpsch_vi(struct device_queue_manager *dqm)
  142. {
  143. return 0;
  144. }