sched_fence.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/kthread.h>
  25. #include <linux/wait.h>
  26. #include <linux/sched.h>
  27. #include <drm/drmP.h>
  28. #include "gpu_scheduler.h"
  29. struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
  30. {
  31. struct amd_sched_fence *fence = NULL;
  32. unsigned seq;
  33. fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
  34. if (fence == NULL)
  35. return NULL;
  36. INIT_LIST_HEAD(&fence->scheduled_cb);
  37. fence->owner = owner;
  38. fence->sched = s_entity->sched;
  39. spin_lock_init(&fence->lock);
  40. seq = atomic_inc_return(&s_entity->fence_seq);
  41. fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
  42. s_entity->fence_context, seq);
  43. return fence;
  44. }
  45. void amd_sched_fence_signal(struct amd_sched_fence *fence)
  46. {
  47. int ret = fence_signal(&fence->base);
  48. if (!ret)
  49. FENCE_TRACE(&fence->base, "signaled from irq context\n");
  50. else
  51. FENCE_TRACE(&fence->base, "was already signaled\n");
  52. }
  53. void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
  54. {
  55. struct fence_cb *cur, *tmp;
  56. set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
  57. list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
  58. list_del_init(&cur->node);
  59. cur->func(&s_fence->base, cur);
  60. }
  61. }
  62. static const char *amd_sched_fence_get_driver_name(struct fence *fence)
  63. {
  64. return "amd_sched";
  65. }
  66. static const char *amd_sched_fence_get_timeline_name(struct fence *f)
  67. {
  68. struct amd_sched_fence *fence = to_amd_sched_fence(f);
  69. return (const char *)fence->sched->name;
  70. }
  71. static bool amd_sched_fence_enable_signaling(struct fence *f)
  72. {
  73. return true;
  74. }
  75. static void amd_sched_fence_release(struct fence *f)
  76. {
  77. struct amd_sched_fence *fence = to_amd_sched_fence(f);
  78. kmem_cache_free(sched_fence_slab, fence);
  79. }
  80. const struct fence_ops amd_sched_fence_ops = {
  81. .get_driver_name = amd_sched_fence_get_driver_name,
  82. .get_timeline_name = amd_sched_fence_get_timeline_name,
  83. .enable_signaling = amd_sched_fence_enable_signaling,
  84. .signaled = NULL,
  85. .wait = fence_default_wait,
  86. .release = amd_sched_fence_release,
  87. };