kfd_flat_memory.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/device.h>
  24. #include <linux/export.h>
  25. #include <linux/err.h>
  26. #include <linux/fs.h>
  27. #include <linux/sched.h>
  28. #include <linux/slab.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/compat.h>
  31. #include <uapi/linux/kfd_ioctl.h>
  32. #include <linux/time.h>
  33. #include "kfd_priv.h"
  34. #include <linux/mm.h>
  35. #include <linux/mman.h>
  36. #include <asm/processor.h>
  37. /*
  38. * The primary memory I/O features being added for revisions of gfxip
  39. * beyond 7.0 (Kaveri) are:
  40. *
  41. * Access to ATC/IOMMU mapped memory w/ associated extension of VA to 48b
  42. *
  43. * “Flat” shader memory access – These are new shader vector memory
  44. * operations that do not reference a T#/V# so a “pointer” is what is
  45. * sourced from the vector gprs for direct access to memory.
  46. * This pointer space has the Shared(LDS) and Private(Scratch) memory
  47. * mapped into this pointer space as apertures.
  48. * The hardware then determines how to direct the memory request
  49. * based on what apertures the request falls in.
  50. *
  51. * Unaligned support and alignment check
  52. *
  53. *
  54. * System Unified Address - SUA
  55. *
  56. * The standard usage for GPU virtual addresses are that they are mapped by
  57. * a set of page tables we call GPUVM and these page tables are managed by
  58. * a combination of vidMM/driver software components. The current virtual
  59. * address (VA) range for GPUVM is 40b.
  60. *
  61. * As of gfxip7.1 and beyond we’re adding the ability for compute memory
  62. * clients (CP/RLC, DMA, SHADER(ifetch, scalar, and vector ops)) to access
  63. * the same page tables used by host x86 processors and that are managed by
  64. * the operating system. This is via a technique and hardware called ATC/IOMMU.
  65. * The GPU has the capability of accessing both the GPUVM and ATC address
  66. * spaces for a given VMID (process) simultaneously and we call this feature
  67. * system unified address (SUA).
  68. *
  69. * There are three fundamental address modes of operation for a given VMID
  70. * (process) on the GPU:
  71. *
  72. * HSA64 – 64b pointers and the default address space is ATC
  73. * HSA32 – 32b pointers and the default address space is ATC
  74. * GPUVM – 64b pointers and the default address space is GPUVM (driver
  75. * model mode)
  76. *
  77. *
  78. * HSA64 - ATC/IOMMU 64b
  79. *
  80. * A 64b pointer in the AMD64/IA64 CPU architecture is not fully utilized
  81. * by the CPU so an AMD CPU can only access the high area
  82. * (VA[63:47] == 0x1FFFF) and low area (VA[63:47 == 0) of the address space
  83. * so the actual VA carried to translation is 48b. There is a “hole” in
  84. * the middle of the 64b VA space.
  85. *
  86. * The GPU not only has access to all of the CPU accessible address space via
  87. * ATC/IOMMU, but it also has access to the GPUVM address space. The “system
  88. * unified address” feature (SUA) is the mapping of GPUVM and ATC address
  89. * spaces into a unified pointer space. The method we take for 64b mode is
  90. * to map the full 40b GPUVM address space into the hole of the 64b address
  91. * space.
  92. * The GPUVM_Base/GPUVM_Limit defines the aperture in the 64b space where we
  93. * direct requests to be translated via GPUVM page tables instead of the
  94. * IOMMU path.
  95. *
  96. *
  97. * 64b to 49b Address conversion
  98. *
  99. * Note that there are still significant portions of unused regions (holes)
  100. * in the 64b address space even for the GPU. There are several places in
  101. * the pipeline (sw and hw), we wish to compress the 64b virtual address
  102. * to a 49b address. This 49b address is constituted of an “ATC” bit
  103. * plus a 48b virtual address. This 49b address is what is passed to the
  104. * translation hardware. ATC==0 means the 48b address is a GPUVM address
  105. * (max of 2^40 – 1) intended to be translated via GPUVM page tables.
  106. * ATC==1 means the 48b address is intended to be translated via IOMMU
  107. * page tables.
  108. *
  109. * A 64b pointer is compared to the apertures that are defined (Base/Limit), in
  110. * this case the GPUVM aperture (red) is defined and if a pointer falls in this
  111. * aperture, we subtract the GPUVM_Base address and set the ATC bit to zero
  112. * as part of the 64b to 49b conversion.
  113. *
  114. * Where this 64b to 49b conversion is done is a function of the usage.
  115. * Most GPU memory access is via memory objects where the driver builds
  116. * a descriptor which consists of a base address and a memory access by
  117. * the GPU usually consists of some kind of an offset or Cartesian coordinate
  118. * that references this memory descriptor. This is the case for shader
  119. * instructions that reference the T# or V# constants, or for specified
  120. * locations of assets (ex. the shader program location). In these cases
  121. * the driver is what handles the 64b to 49b conversion and the base
  122. * address in the descriptor (ex. V# or T# or shader program location)
  123. * is defined as a 48b address w/ an ATC bit. For this usage a given
  124. * memory object cannot straddle multiple apertures in the 64b address
  125. * space. For example a shader program cannot jump in/out between ATC
  126. * and GPUVM space.
  127. *
  128. * In some cases we wish to pass a 64b pointer to the GPU hardware and
  129. * the GPU hw does the 64b to 49b conversion before passing memory
  130. * requests to the cache/memory system. This is the case for the
  131. * S_LOAD and FLAT_* shader memory instructions where we have 64b pointers
  132. * in scalar and vector GPRs respectively.
  133. *
  134. * In all cases (no matter where the 64b -> 49b conversion is done), the gfxip
  135. * hardware sends a 48b address along w/ an ATC bit, to the memory controller
  136. * on the memory request interfaces.
  137. *
  138. * <client>_MC_rdreq_atc // read request ATC bit
  139. *
  140. * 0 : <client>_MC_rdreq_addr is a GPUVM VA
  141. *
  142. * 1 : <client>_MC_rdreq_addr is a ATC VA
  143. *
  144. *
  145. * “Spare” aperture (APE1)
  146. *
  147. * We use the GPUVM aperture to differentiate ATC vs. GPUVM, but we also use
  148. * apertures to set the Mtype field for S_LOAD/FLAT_* ops which is input to the
  149. * config tables for setting cache policies. The “spare” (APE1) aperture is
  150. * motivated by getting a different Mtype from the default.
  151. * The default aperture isn’t an actual base/limit aperture; it is just the
  152. * address space that doesn’t hit any defined base/limit apertures.
  153. * The following diagram is a complete picture of the gfxip7.x SUA apertures.
  154. * The APE1 can be placed either below or above
  155. * the hole (cannot be in the hole).
  156. *
  157. *
  158. * General Aperture definitions and rules
  159. *
  160. * An aperture register definition consists of a Base, Limit, Mtype, and
  161. * usually an ATC bit indicating which translation tables that aperture uses.
  162. * In all cases (for SUA and DUA apertures discussed later), aperture base
  163. * and limit definitions are 64KB aligned.
  164. *
  165. * <ape>_Base[63:0] = { <ape>_Base_register[63:16], 0x0000 }
  166. *
  167. * <ape>_Limit[63:0] = { <ape>_Limit_register[63:16], 0xFFFF }
  168. *
  169. * The base and limit are considered inclusive to an aperture so being
  170. * inside an aperture means (address >= Base) AND (address <= Limit).
  171. *
  172. * In no case is a payload that straddles multiple apertures expected to work.
  173. * For example a load_dword_x4 that starts in one aperture and ends in another,
  174. * does not work. For the vector FLAT_* ops we have detection capability in
  175. * the shader for reporting a “memory violation” back to the
  176. * SQ block for use in traps.
  177. * A memory violation results when an op falls into the hole,
  178. * or a payload straddles multiple apertures. The S_LOAD instruction
  179. * does not have this detection.
  180. *
  181. * Apertures cannot overlap.
  182. *
  183. *
  184. *
  185. * HSA32 - ATC/IOMMU 32b
  186. *
  187. * For HSA32 mode, the pointers are interpreted as 32 bits and use a single GPR
  188. * instead of two for the S_LOAD and FLAT_* ops. The entire GPUVM space of 40b
  189. * will not fit so there is only partial visibility to the GPUVM
  190. * space (defined by the aperture) for S_LOAD and FLAT_* ops.
  191. * There is no spare (APE1) aperture for HSA32 mode.
  192. *
  193. *
  194. * GPUVM 64b mode (driver model)
  195. *
  196. * This mode is related to HSA64 in that the difference really is that
  197. * the default aperture is GPUVM (ATC==0) and not ATC space.
  198. * We have gfxip7.x hardware that has FLAT_* and S_LOAD support for
  199. * SUA GPUVM mode, but does not support HSA32/HSA64.
  200. *
  201. *
  202. * Device Unified Address - DUA
  203. *
  204. * Device unified address (DUA) is the name of the feature that maps the
  205. * Shared(LDS) memory and Private(Scratch) memory into the overall address
  206. * space for use by the new FLAT_* vector memory ops. The Shared and
  207. * Private memories are mapped as apertures into the address space,
  208. * and the hardware detects when a FLAT_* memory request is to be redirected
  209. * to the LDS or Scratch memory when it falls into one of these apertures.
  210. * Like the SUA apertures, the Shared/Private apertures are 64KB aligned and
  211. * the base/limit is “in” the aperture. For both HSA64 and GPUVM SUA modes,
  212. * the Shared/Private apertures are always placed in a limited selection of
  213. * options in the hole of the 64b address space. For HSA32 mode, the
  214. * Shared/Private apertures can be placed anywhere in the 32b space
  215. * except at 0.
  216. *
  217. *
  218. * HSA64 Apertures for FLAT_* vector ops
  219. *
  220. * For HSA64 SUA mode, the Shared and Private apertures are always placed
  221. * in the hole w/ a limited selection of possible locations. The requests
  222. * that fall in the private aperture are expanded as a function of the
  223. * work-item id (tid) and redirected to the location of the
  224. * “hidden private memory”. The hidden private can be placed in either GPUVM
  225. * or ATC space. The addresses that fall in the shared aperture are
  226. * re-directed to the on-chip LDS memory hardware.
  227. *
  228. *
  229. * HSA32 Apertures for FLAT_* vector ops
  230. *
  231. * In HSA32 mode, the Private and Shared apertures can be placed anywhere
  232. * in the 32b space except at 0 (Private or Shared Base at zero disables
  233. * the apertures). If the base address of the apertures are non-zero
  234. * (ie apertures exists), the size is always 64KB.
  235. *
  236. *
  237. * GPUVM Apertures for FLAT_* vector ops
  238. *
  239. * In GPUVM mode, the Shared/Private apertures are specified identically
  240. * to HSA64 mode where they are always in the hole at a limited selection
  241. * of locations.
  242. *
  243. *
  244. * Aperture Definitions for SUA and DUA
  245. *
  246. * The interpretation of the aperture register definitions for a given
  247. * VMID is a function of the “SUA Mode” which is one of HSA64, HSA32, or
  248. * GPUVM64 discussed in previous sections. The mode is first decoded, and
  249. * then the remaining register decode is a function of the mode.
  250. *
  251. *
  252. * SUA Mode Decode
  253. *
  254. * For the S_LOAD and FLAT_* shader operations, the SUA mode is decoded from
  255. * the COMPUTE_DISPATCH_INITIATOR:DATA_ATC bit and
  256. * the SH_MEM_CONFIG:PTR32 bits.
  257. *
  258. * COMPUTE_DISPATCH_INITIATOR:DATA_ATC SH_MEM_CONFIG:PTR32 Mode
  259. *
  260. * 1 0 HSA64
  261. *
  262. * 1 1 HSA32
  263. *
  264. * 0 X GPUVM64
  265. *
  266. * In general the hardware will ignore the PTR32 bit and treat
  267. * as “0” whenever DATA_ATC = “0”, but sw should set PTR32=0
  268. * when DATA_ATC=0.
  269. *
  270. * The DATA_ATC bit is only set for compute dispatches.
  271. * All “Draw” dispatches are hardcoded to GPUVM64 mode
  272. * for FLAT_* / S_LOAD operations.
  273. */
  274. #define MAKE_GPUVM_APP_BASE(gpu_num) \
  275. (((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
  276. #define MAKE_GPUVM_APP_LIMIT(base) \
  277. (((uint64_t)(base) & \
  278. 0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL)
  279. #define MAKE_SCRATCH_APP_BASE(gpu_num) \
  280. (((uint64_t)(gpu_num) << 61) + 0x100000000L)
  281. #define MAKE_SCRATCH_APP_LIMIT(base) \
  282. (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
  283. #define MAKE_LDS_APP_BASE(gpu_num) \
  284. (((uint64_t)(gpu_num) << 61) + 0x0)
  285. #define MAKE_LDS_APP_LIMIT(base) \
  286. (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
  287. int kfd_init_apertures(struct kfd_process *process)
  288. {
  289. uint8_t id = 0;
  290. struct kfd_dev *dev;
  291. struct kfd_process_device *pdd;
  292. /*Iterating over all devices*/
  293. while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
  294. id < NUM_OF_SUPPORTED_GPUS) {
  295. pdd = kfd_create_process_device_data(dev, process);
  296. if (pdd == NULL) {
  297. pr_err("Failed to create process device data\n");
  298. return -1;
  299. }
  300. /*
  301. * For 64 bit process aperture will be statically reserved in
  302. * the x86_64 non canonical process address space
  303. * amdkfd doesn't currently support apertures for 32 bit process
  304. */
  305. if (process->is_32bit_user_mode) {
  306. pdd->lds_base = pdd->lds_limit = 0;
  307. pdd->gpuvm_base = pdd->gpuvm_limit = 0;
  308. pdd->scratch_base = pdd->scratch_limit = 0;
  309. } else {
  310. /*
  311. * node id couldn't be 0 - the three MSB bits of
  312. * aperture shoudn't be 0
  313. */
  314. pdd->lds_base = MAKE_LDS_APP_BASE(id + 1);
  315. pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
  316. pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
  317. pdd->gpuvm_limit =
  318. MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base);
  319. pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1);
  320. pdd->scratch_limit =
  321. MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
  322. }
  323. dev_dbg(kfd_device, "node id %u\n", id);
  324. dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id);
  325. dev_dbg(kfd_device, "lds_base %llX\n", pdd->lds_base);
  326. dev_dbg(kfd_device, "lds_limit %llX\n", pdd->lds_limit);
  327. dev_dbg(kfd_device, "gpuvm_base %llX\n", pdd->gpuvm_base);
  328. dev_dbg(kfd_device, "gpuvm_limit %llX\n", pdd->gpuvm_limit);
  329. dev_dbg(kfd_device, "scratch_base %llX\n", pdd->scratch_base);
  330. dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit);
  331. id++;
  332. }
  333. return 0;
  334. }