sync.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * drivers/base/sync.c
  3. *
  4. * Copyright (C) 2012 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/debugfs.h>
  17. #include <linux/export.h>
  18. #include <linux/file.h>
  19. #include <linux/fs.h>
  20. #include <linux/kernel.h>
  21. #include <linux/poll.h>
  22. #include <linux/sched.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/slab.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/anon_inodes.h>
  27. #include "sync.h"
  28. #define CREATE_TRACE_POINTS
  29. #include "trace/sync.h"
  30. static const struct fence_ops android_fence_ops;
  31. static const struct file_operations sync_fence_fops;
  32. struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
  33. int size, const char *name)
  34. {
  35. struct sync_timeline *obj;
  36. if (size < sizeof(struct sync_timeline))
  37. return NULL;
  38. obj = kzalloc(size, GFP_KERNEL);
  39. if (obj == NULL)
  40. return NULL;
  41. kref_init(&obj->kref);
  42. obj->ops = ops;
  43. obj->context = fence_context_alloc(1);
  44. strlcpy(obj->name, name, sizeof(obj->name));
  45. INIT_LIST_HEAD(&obj->child_list_head);
  46. INIT_LIST_HEAD(&obj->active_list_head);
  47. spin_lock_init(&obj->child_list_lock);
  48. sync_timeline_debug_add(obj);
  49. return obj;
  50. }
  51. EXPORT_SYMBOL(sync_timeline_create);
  52. static void sync_timeline_free(struct kref *kref)
  53. {
  54. struct sync_timeline *obj =
  55. container_of(kref, struct sync_timeline, kref);
  56. sync_timeline_debug_remove(obj);
  57. if (obj->ops->release_obj)
  58. obj->ops->release_obj(obj);
  59. kfree(obj);
  60. }
  61. static void sync_timeline_get(struct sync_timeline *obj)
  62. {
  63. kref_get(&obj->kref);
  64. }
  65. static void sync_timeline_put(struct sync_timeline *obj)
  66. {
  67. kref_put(&obj->kref, sync_timeline_free);
  68. }
  69. void sync_timeline_destroy(struct sync_timeline *obj)
  70. {
  71. obj->destroyed = true;
  72. /*
  73. * Ensure timeline is marked as destroyed before
  74. * changing timeline's fences status.
  75. */
  76. smp_wmb();
  77. /*
  78. * signal any children that their parent is going away.
  79. */
  80. sync_timeline_signal(obj);
  81. sync_timeline_put(obj);
  82. }
  83. EXPORT_SYMBOL(sync_timeline_destroy);
  84. void sync_timeline_signal(struct sync_timeline *obj)
  85. {
  86. unsigned long flags;
  87. LIST_HEAD(signaled_pts);
  88. struct sync_pt *pt, *next;
  89. trace_sync_timeline(obj);
  90. spin_lock_irqsave(&obj->child_list_lock, flags);
  91. list_for_each_entry_safe(pt, next, &obj->active_list_head,
  92. active_list) {
  93. if (fence_is_signaled_locked(&pt->base))
  94. list_del_init(&pt->active_list);
  95. }
  96. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  97. }
  98. EXPORT_SYMBOL(sync_timeline_signal);
  99. struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
  100. {
  101. unsigned long flags;
  102. struct sync_pt *pt;
  103. if (size < sizeof(struct sync_pt))
  104. return NULL;
  105. pt = kzalloc(size, GFP_KERNEL);
  106. if (pt == NULL)
  107. return NULL;
  108. spin_lock_irqsave(&obj->child_list_lock, flags);
  109. sync_timeline_get(obj);
  110. fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
  111. obj->context, ++obj->value);
  112. list_add_tail(&pt->child_list, &obj->child_list_head);
  113. INIT_LIST_HEAD(&pt->active_list);
  114. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  115. return pt;
  116. }
  117. EXPORT_SYMBOL(sync_pt_create);
  118. void sync_pt_free(struct sync_pt *pt)
  119. {
  120. fence_put(&pt->base);
  121. }
  122. EXPORT_SYMBOL(sync_pt_free);
  123. static struct sync_fence *sync_fence_alloc(int size, const char *name)
  124. {
  125. struct sync_fence *fence;
  126. fence = kzalloc(size, GFP_KERNEL);
  127. if (fence == NULL)
  128. return NULL;
  129. fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
  130. fence, 0);
  131. if (IS_ERR(fence->file))
  132. goto err;
  133. kref_init(&fence->kref);
  134. strlcpy(fence->name, name, sizeof(fence->name));
  135. init_waitqueue_head(&fence->wq);
  136. return fence;
  137. err:
  138. kfree(fence);
  139. return NULL;
  140. }
  141. static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
  142. {
  143. struct sync_fence_cb *check;
  144. struct sync_fence *fence;
  145. check = container_of(cb, struct sync_fence_cb, cb);
  146. fence = check->fence;
  147. if (atomic_dec_and_test(&fence->status))
  148. wake_up_all(&fence->wq);
  149. }
  150. /* TODO: implement a create which takes more that one sync_pt */
  151. struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
  152. {
  153. struct sync_fence *fence;
  154. fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
  155. if (fence == NULL)
  156. return NULL;
  157. fence->num_fences = 1;
  158. atomic_set(&fence->status, 1);
  159. fence->cbs[0].sync_pt = &pt->base;
  160. fence->cbs[0].fence = fence;
  161. if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
  162. fence_check_cb_func))
  163. atomic_dec(&fence->status);
  164. sync_fence_debug_add(fence);
  165. return fence;
  166. }
  167. EXPORT_SYMBOL(sync_fence_create);
  168. struct sync_fence *sync_fence_fdget(int fd)
  169. {
  170. struct file *file = fget(fd);
  171. if (file == NULL)
  172. return NULL;
  173. if (file->f_op != &sync_fence_fops)
  174. goto err;
  175. return file->private_data;
  176. err:
  177. fput(file);
  178. return NULL;
  179. }
  180. EXPORT_SYMBOL(sync_fence_fdget);
  181. void sync_fence_put(struct sync_fence *fence)
  182. {
  183. fput(fence->file);
  184. }
  185. EXPORT_SYMBOL(sync_fence_put);
  186. void sync_fence_install(struct sync_fence *fence, int fd)
  187. {
  188. fd_install(fd, fence->file);
  189. }
  190. EXPORT_SYMBOL(sync_fence_install);
  191. static void sync_fence_add_pt(struct sync_fence *fence,
  192. int *i, struct fence *pt)
  193. {
  194. fence->cbs[*i].sync_pt = pt;
  195. fence->cbs[*i].fence = fence;
  196. if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
  197. fence_get(pt);
  198. (*i)++;
  199. }
  200. }
  201. struct sync_fence *sync_fence_merge(const char *name,
  202. struct sync_fence *a, struct sync_fence *b)
  203. {
  204. int num_fences = a->num_fences + b->num_fences;
  205. struct sync_fence *fence;
  206. int i, i_a, i_b;
  207. unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
  208. fence = sync_fence_alloc(size, name);
  209. if (fence == NULL)
  210. return NULL;
  211. atomic_set(&fence->status, num_fences);
  212. /*
  213. * Assume sync_fence a and b are both ordered and have no
  214. * duplicates with the same context.
  215. *
  216. * If a sync_fence can only be created with sync_fence_merge
  217. * and sync_fence_create, this is a reasonable assumption.
  218. */
  219. for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
  220. struct fence *pt_a = a->cbs[i_a].sync_pt;
  221. struct fence *pt_b = b->cbs[i_b].sync_pt;
  222. if (pt_a->context < pt_b->context) {
  223. sync_fence_add_pt(fence, &i, pt_a);
  224. i_a++;
  225. } else if (pt_a->context > pt_b->context) {
  226. sync_fence_add_pt(fence, &i, pt_b);
  227. i_b++;
  228. } else {
  229. if (pt_a->seqno - pt_b->seqno <= INT_MAX)
  230. sync_fence_add_pt(fence, &i, pt_a);
  231. else
  232. sync_fence_add_pt(fence, &i, pt_b);
  233. i_a++;
  234. i_b++;
  235. }
  236. }
  237. for (; i_a < a->num_fences; i_a++)
  238. sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
  239. for (; i_b < b->num_fences; i_b++)
  240. sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
  241. if (num_fences > i)
  242. atomic_sub(num_fences - i, &fence->status);
  243. fence->num_fences = i;
  244. sync_fence_debug_add(fence);
  245. return fence;
  246. }
  247. EXPORT_SYMBOL(sync_fence_merge);
  248. int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
  249. int wake_flags, void *key)
  250. {
  251. struct sync_fence_waiter *wait;
  252. wait = container_of(curr, struct sync_fence_waiter, work);
  253. list_del_init(&wait->work.task_list);
  254. wait->callback(wait->work.private, wait);
  255. return 1;
  256. }
  257. int sync_fence_wait_async(struct sync_fence *fence,
  258. struct sync_fence_waiter *waiter)
  259. {
  260. int err = atomic_read(&fence->status);
  261. unsigned long flags;
  262. if (err < 0)
  263. return err;
  264. if (!err)
  265. return 1;
  266. init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
  267. waiter->work.private = fence;
  268. spin_lock_irqsave(&fence->wq.lock, flags);
  269. err = atomic_read(&fence->status);
  270. if (err > 0)
  271. __add_wait_queue_tail(&fence->wq, &waiter->work);
  272. spin_unlock_irqrestore(&fence->wq.lock, flags);
  273. if (err < 0)
  274. return err;
  275. return !err;
  276. }
  277. EXPORT_SYMBOL(sync_fence_wait_async);
  278. int sync_fence_cancel_async(struct sync_fence *fence,
  279. struct sync_fence_waiter *waiter)
  280. {
  281. unsigned long flags;
  282. int ret = 0;
  283. spin_lock_irqsave(&fence->wq.lock, flags);
  284. if (!list_empty(&waiter->work.task_list))
  285. list_del_init(&waiter->work.task_list);
  286. else
  287. ret = -ENOENT;
  288. spin_unlock_irqrestore(&fence->wq.lock, flags);
  289. return ret;
  290. }
  291. EXPORT_SYMBOL(sync_fence_cancel_async);
  292. int sync_fence_wait(struct sync_fence *fence, long timeout)
  293. {
  294. long ret;
  295. int i;
  296. if (timeout < 0)
  297. timeout = MAX_SCHEDULE_TIMEOUT;
  298. else
  299. timeout = msecs_to_jiffies(timeout);
  300. trace_sync_wait(fence, 1);
  301. for (i = 0; i < fence->num_fences; ++i)
  302. trace_sync_pt(fence->cbs[i].sync_pt);
  303. ret = wait_event_interruptible_timeout(fence->wq,
  304. atomic_read(&fence->status) <= 0,
  305. timeout);
  306. trace_sync_wait(fence, 0);
  307. if (ret < 0) {
  308. return ret;
  309. } else if (ret == 0) {
  310. if (timeout) {
  311. pr_info("fence timeout on [%p] after %dms\n", fence,
  312. jiffies_to_msecs(timeout));
  313. sync_dump();
  314. }
  315. return -ETIME;
  316. }
  317. ret = atomic_read(&fence->status);
  318. if (ret) {
  319. pr_info("fence error %ld on [%p]\n", ret, fence);
  320. sync_dump();
  321. }
  322. return ret;
  323. }
  324. EXPORT_SYMBOL(sync_fence_wait);
  325. static const char *android_fence_get_driver_name(struct fence *fence)
  326. {
  327. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  328. struct sync_timeline *parent = sync_pt_parent(pt);
  329. return parent->ops->driver_name;
  330. }
  331. static const char *android_fence_get_timeline_name(struct fence *fence)
  332. {
  333. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  334. struct sync_timeline *parent = sync_pt_parent(pt);
  335. return parent->name;
  336. }
  337. static void android_fence_release(struct fence *fence)
  338. {
  339. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  340. struct sync_timeline *parent = sync_pt_parent(pt);
  341. unsigned long flags;
  342. spin_lock_irqsave(fence->lock, flags);
  343. list_del(&pt->child_list);
  344. if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
  345. list_del(&pt->active_list);
  346. spin_unlock_irqrestore(fence->lock, flags);
  347. if (parent->ops->free_pt)
  348. parent->ops->free_pt(pt);
  349. sync_timeline_put(parent);
  350. fence_free(&pt->base);
  351. }
  352. static bool android_fence_signaled(struct fence *fence)
  353. {
  354. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  355. struct sync_timeline *parent = sync_pt_parent(pt);
  356. int ret;
  357. ret = parent->ops->has_signaled(pt);
  358. if (ret < 0)
  359. fence->status = ret;
  360. return ret;
  361. }
  362. static bool android_fence_enable_signaling(struct fence *fence)
  363. {
  364. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  365. struct sync_timeline *parent = sync_pt_parent(pt);
  366. if (android_fence_signaled(fence))
  367. return false;
  368. list_add_tail(&pt->active_list, &parent->active_list_head);
  369. return true;
  370. }
  371. static int android_fence_fill_driver_data(struct fence *fence,
  372. void *data, int size)
  373. {
  374. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  375. struct sync_timeline *parent = sync_pt_parent(pt);
  376. if (!parent->ops->fill_driver_data)
  377. return 0;
  378. return parent->ops->fill_driver_data(pt, data, size);
  379. }
  380. static void android_fence_value_str(struct fence *fence,
  381. char *str, int size)
  382. {
  383. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  384. struct sync_timeline *parent = sync_pt_parent(pt);
  385. if (!parent->ops->pt_value_str) {
  386. if (size)
  387. *str = 0;
  388. return;
  389. }
  390. parent->ops->pt_value_str(pt, str, size);
  391. }
  392. static void android_fence_timeline_value_str(struct fence *fence,
  393. char *str, int size)
  394. {
  395. struct sync_pt *pt = container_of(fence, struct sync_pt, base);
  396. struct sync_timeline *parent = sync_pt_parent(pt);
  397. if (!parent->ops->timeline_value_str) {
  398. if (size)
  399. *str = 0;
  400. return;
  401. }
  402. parent->ops->timeline_value_str(parent, str, size);
  403. }
  404. static const struct fence_ops android_fence_ops = {
  405. .get_driver_name = android_fence_get_driver_name,
  406. .get_timeline_name = android_fence_get_timeline_name,
  407. .enable_signaling = android_fence_enable_signaling,
  408. .signaled = android_fence_signaled,
  409. .wait = fence_default_wait,
  410. .release = android_fence_release,
  411. .fill_driver_data = android_fence_fill_driver_data,
  412. .fence_value_str = android_fence_value_str,
  413. .timeline_value_str = android_fence_timeline_value_str,
  414. };
  415. static void sync_fence_free(struct kref *kref)
  416. {
  417. struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
  418. int i;
  419. for (i = 0; i < fence->num_fences; ++i) {
  420. fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb);
  421. fence_put(fence->cbs[i].sync_pt);
  422. }
  423. kfree(fence);
  424. }
  425. static int sync_fence_release(struct inode *inode, struct file *file)
  426. {
  427. struct sync_fence *fence = file->private_data;
  428. sync_fence_debug_remove(fence);
  429. kref_put(&fence->kref, sync_fence_free);
  430. return 0;
  431. }
  432. static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
  433. {
  434. struct sync_fence *fence = file->private_data;
  435. int status;
  436. poll_wait(file, &fence->wq, wait);
  437. status = atomic_read(&fence->status);
  438. if (!status)
  439. return POLLIN;
  440. else if (status < 0)
  441. return POLLERR;
  442. return 0;
  443. }
  444. static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
  445. {
  446. __s32 value;
  447. if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
  448. return -EFAULT;
  449. return sync_fence_wait(fence, value);
  450. }
  451. static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
  452. {
  453. int fd = get_unused_fd_flags(O_CLOEXEC);
  454. int err;
  455. struct sync_fence *fence2, *fence3;
  456. struct sync_merge_data data;
  457. if (fd < 0)
  458. return fd;
  459. if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
  460. err = -EFAULT;
  461. goto err_put_fd;
  462. }
  463. fence2 = sync_fence_fdget(data.fd2);
  464. if (fence2 == NULL) {
  465. err = -ENOENT;
  466. goto err_put_fd;
  467. }
  468. data.name[sizeof(data.name) - 1] = '\0';
  469. fence3 = sync_fence_merge(data.name, fence, fence2);
  470. if (fence3 == NULL) {
  471. err = -ENOMEM;
  472. goto err_put_fence2;
  473. }
  474. data.fence = fd;
  475. if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
  476. err = -EFAULT;
  477. goto err_put_fence3;
  478. }
  479. sync_fence_install(fence3, fd);
  480. sync_fence_put(fence2);
  481. return 0;
  482. err_put_fence3:
  483. sync_fence_put(fence3);
  484. err_put_fence2:
  485. sync_fence_put(fence2);
  486. err_put_fd:
  487. put_unused_fd(fd);
  488. return err;
  489. }
  490. static int sync_fill_pt_info(struct fence *fence, void *data, int size)
  491. {
  492. struct sync_pt_info *info = data;
  493. int ret;
  494. if (size < sizeof(struct sync_pt_info))
  495. return -ENOMEM;
  496. info->len = sizeof(struct sync_pt_info);
  497. if (fence->ops->fill_driver_data) {
  498. ret = fence->ops->fill_driver_data(fence, info->driver_data,
  499. size - sizeof(*info));
  500. if (ret < 0)
  501. return ret;
  502. info->len += ret;
  503. }
  504. strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
  505. sizeof(info->obj_name));
  506. strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
  507. sizeof(info->driver_name));
  508. if (fence_is_signaled(fence))
  509. info->status = fence->status >= 0 ? 1 : fence->status;
  510. else
  511. info->status = 0;
  512. info->timestamp_ns = ktime_to_ns(fence->timestamp);
  513. return info->len;
  514. }
  515. static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
  516. unsigned long arg)
  517. {
  518. struct sync_fence_info_data *data;
  519. __u32 size;
  520. __u32 len = 0;
  521. int ret, i;
  522. if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
  523. return -EFAULT;
  524. if (size < sizeof(struct sync_fence_info_data))
  525. return -EINVAL;
  526. if (size > 4096)
  527. size = 4096;
  528. data = kzalloc(size, GFP_KERNEL);
  529. if (data == NULL)
  530. return -ENOMEM;
  531. strlcpy(data->name, fence->name, sizeof(data->name));
  532. data->status = atomic_read(&fence->status);
  533. if (data->status >= 0)
  534. data->status = !data->status;
  535. len = sizeof(struct sync_fence_info_data);
  536. for (i = 0; i < fence->num_fences; ++i) {
  537. struct fence *pt = fence->cbs[i].sync_pt;
  538. ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
  539. if (ret < 0)
  540. goto out;
  541. len += ret;
  542. }
  543. data->len = len;
  544. if (copy_to_user((void __user *)arg, data, len))
  545. ret = -EFAULT;
  546. else
  547. ret = 0;
  548. out:
  549. kfree(data);
  550. return ret;
  551. }
  552. static long sync_fence_ioctl(struct file *file, unsigned int cmd,
  553. unsigned long arg)
  554. {
  555. struct sync_fence *fence = file->private_data;
  556. switch (cmd) {
  557. case SYNC_IOC_WAIT:
  558. return sync_fence_ioctl_wait(fence, arg);
  559. case SYNC_IOC_MERGE:
  560. return sync_fence_ioctl_merge(fence, arg);
  561. case SYNC_IOC_FENCE_INFO:
  562. return sync_fence_ioctl_fence_info(fence, arg);
  563. default:
  564. return -ENOTTY;
  565. }
  566. }
  567. static const struct file_operations sync_fence_fops = {
  568. .release = sync_fence_release,
  569. .poll = sync_fence_poll,
  570. .unlocked_ioctl = sync_fence_ioctl,
  571. .compat_ioctl = sync_fence_ioctl,
  572. };