nouveau_sgdma.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. #include <linux/pagemap.h>
  2. #include <linux/slab.h>
  3. #include "nouveau_drm.h"
  4. #include "nouveau_ttm.h"
  5. struct nouveau_sgdma_be {
  6. /* this has to be the first field so populate/unpopulated in
  7. * nouve_bo.c works properly, otherwise have to move them here
  8. */
  9. struct ttm_dma_tt ttm;
  10. struct nvkm_mem *node;
  11. };
  12. static void
  13. nouveau_sgdma_destroy(struct ttm_tt *ttm)
  14. {
  15. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  16. if (ttm) {
  17. ttm_dma_tt_fini(&nvbe->ttm);
  18. kfree(nvbe);
  19. }
  20. }
  21. static int
  22. nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  23. {
  24. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  25. struct nvkm_mem *node = mem->mm_node;
  26. if (ttm->sg) {
  27. node->sg = ttm->sg;
  28. node->pages = NULL;
  29. } else {
  30. node->sg = NULL;
  31. node->pages = nvbe->ttm.dma_address;
  32. }
  33. node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
  34. nvkm_vm_map(&node->vma[0], node);
  35. nvbe->node = node;
  36. return 0;
  37. }
  38. static int
  39. nv04_sgdma_unbind(struct ttm_tt *ttm)
  40. {
  41. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  42. nvkm_vm_unmap(&nvbe->node->vma[0]);
  43. return 0;
  44. }
  45. static struct ttm_backend_func nv04_sgdma_backend = {
  46. .bind = nv04_sgdma_bind,
  47. .unbind = nv04_sgdma_unbind,
  48. .destroy = nouveau_sgdma_destroy
  49. };
  50. static int
  51. nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  52. {
  53. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  54. struct nvkm_mem *node = mem->mm_node;
  55. /* noop: bound in move_notify() */
  56. if (ttm->sg) {
  57. node->sg = ttm->sg;
  58. node->pages = NULL;
  59. } else {
  60. node->sg = NULL;
  61. node->pages = nvbe->ttm.dma_address;
  62. }
  63. node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
  64. return 0;
  65. }
  66. static int
  67. nv50_sgdma_unbind(struct ttm_tt *ttm)
  68. {
  69. /* noop: unbound in move_notify() */
  70. return 0;
  71. }
  72. static struct ttm_backend_func nv50_sgdma_backend = {
  73. .bind = nv50_sgdma_bind,
  74. .unbind = nv50_sgdma_unbind,
  75. .destroy = nouveau_sgdma_destroy
  76. };
  77. struct ttm_tt *
  78. nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
  79. unsigned long size, uint32_t page_flags,
  80. struct page *dummy_read_page)
  81. {
  82. struct nouveau_drm *drm = nouveau_bdev(bdev);
  83. struct nouveau_sgdma_be *nvbe;
  84. nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
  85. if (!nvbe)
  86. return NULL;
  87. if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
  88. nvbe->ttm.ttm.func = &nv04_sgdma_backend;
  89. else
  90. nvbe->ttm.ttm.func = &nv50_sgdma_backend;
  91. if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
  92. /*
  93. * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
  94. * and thus our nouveau_sgdma_destroy() hook, so we don't need
  95. * to free nvbe here.
  96. */
  97. return NULL;
  98. return &nvbe->ttm.ttm;
  99. }