sched.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*
  2. * Asterisk -- An open source telephony toolkit.
  3. *
  4. * Copyright (C) 1999 - 2010, Digium, Inc.
  5. *
  6. * Mark Spencer <markster@digium.com>
  7. * Russell Bryant <russell@digium.com>
  8. *
  9. * See http://www.asterisk.org for more information about
  10. * the Asterisk project. Please do not directly contact
  11. * any of the maintainers of this project for assistance;
  12. * the project provides a web site, mailing lists and IRC
  13. * channels for your use.
  14. *
  15. * This program is free software, distributed under the terms of
  16. * the GNU General Public License Version 2. See the LICENSE file
  17. * at the top of the source tree.
  18. */
  19. /*! \file
  20. *
  21. * \brief Scheduler Routines (from cheops-NG)
  22. *
  23. * \author Mark Spencer <markster@digium.com>
  24. */
  25. /*** MODULEINFO
  26. <support_level>core</support_level>
  27. ***/
  28. #include "asterisk.h"
  29. ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
  30. #ifdef DEBUG_SCHEDULER
  31. #define DEBUG(a) a
  32. #else
  33. #define DEBUG(a)
  34. #endif
  35. #include <sys/time.h>
  36. #include "asterisk/sched.h"
  37. #include "asterisk/channel.h"
  38. #include "asterisk/lock.h"
  39. #include "asterisk/utils.h"
  40. #include "asterisk/heap.h"
  41. #include "asterisk/threadstorage.h"
  42. /*!
  43. * \brief Max num of schedule structs
  44. *
  45. * \note The max number of schedule structs to keep around
  46. * for use. Undefine to disable schedule structure
  47. * caching. (Only disable this on very low memory
  48. * machines)
  49. */
  50. #define SCHED_MAX_CACHE 128
  51. AST_THREADSTORAGE(last_del_id);
  52. /*!
  53. * \brief Scheduler ID holder
  54. *
  55. * These form a queue on a scheduler context. When a new
  56. * scheduled item is created, a sched_id is popped off the
  57. * queue and its id is assigned to the new scheduled item.
  58. * When the scheduled task is complete, the sched_id on that
  59. * task is then pushed to the back of the queue to be re-used
  60. * on some future scheduled item.
  61. */
  62. struct sched_id {
  63. /*! Immutable ID number that is copied onto the scheduled task */
  64. int id;
  65. AST_LIST_ENTRY(sched_id) list;
  66. };
  67. struct sched {
  68. AST_LIST_ENTRY(sched) list;
  69. /*! The ID that has been popped off the scheduler context's queue */
  70. struct sched_id *sched_id;
  71. struct timeval when; /*!< Absolute time event should take place */
  72. /*!
  73. * \brief Tie breaker in case the when is the same for multiple entries.
  74. *
  75. * \note The oldest expiring entry in the scheduler heap goes first.
  76. * This is possible when multiple events are scheduled to expire at
  77. * the same time by internal coding.
  78. */
  79. unsigned int tie_breaker;
  80. int resched; /*!< When to reschedule */
  81. int variable; /*!< Use return value from callback to reschedule */
  82. const void *data; /*!< Data */
  83. ast_sched_cb callback; /*!< Callback */
  84. ssize_t __heap_index;
  85. /*!
  86. * Used to synchronize between thread running a task and thread
  87. * attempting to delete a task
  88. */
  89. ast_cond_t cond;
  90. /*! Indication that a running task was deleted. */
  91. unsigned int deleted:1;
  92. };
  93. struct sched_thread {
  94. pthread_t thread;
  95. ast_cond_t cond;
  96. unsigned int stop:1;
  97. };
  98. struct ast_sched_context {
  99. ast_mutex_t lock;
  100. unsigned int eventcnt; /*!< Number of events processed */
  101. unsigned int highwater; /*!< highest count so far */
  102. /*! Next tie breaker in case events expire at the same time. */
  103. unsigned int tie_breaker;
  104. struct ast_heap *sched_heap;
  105. struct sched_thread *sched_thread;
  106. /*! The scheduled task that is currently executing */
  107. struct sched *currently_executing;
  108. #ifdef SCHED_MAX_CACHE
  109. AST_LIST_HEAD_NOLOCK(, sched) schedc; /*!< Cache of unused schedule structures and how many */
  110. unsigned int schedccnt;
  111. #endif
  112. /*! Queue of scheduler task IDs to assign */
  113. AST_LIST_HEAD_NOLOCK(, sched_id) id_queue;
  114. /*! The number of IDs in the id_queue */
  115. int id_queue_size;
  116. };
  117. static void *sched_run(void *data)
  118. {
  119. struct ast_sched_context *con = data;
  120. while (!con->sched_thread->stop) {
  121. int ms;
  122. struct timespec ts = {
  123. .tv_sec = 0,
  124. };
  125. ast_mutex_lock(&con->lock);
  126. if (con->sched_thread->stop) {
  127. ast_mutex_unlock(&con->lock);
  128. return NULL;
  129. }
  130. ms = ast_sched_wait(con);
  131. if (ms == -1) {
  132. ast_cond_wait(&con->sched_thread->cond, &con->lock);
  133. } else {
  134. struct timeval tv;
  135. tv = ast_tvadd(ast_tvnow(), ast_samp2tv(ms, 1000));
  136. ts.tv_sec = tv.tv_sec;
  137. ts.tv_nsec = tv.tv_usec * 1000;
  138. ast_cond_timedwait(&con->sched_thread->cond, &con->lock, &ts);
  139. }
  140. ast_mutex_unlock(&con->lock);
  141. if (con->sched_thread->stop) {
  142. return NULL;
  143. }
  144. ast_sched_runq(con);
  145. }
  146. return NULL;
  147. }
  148. static void sched_thread_destroy(struct ast_sched_context *con)
  149. {
  150. if (!con->sched_thread) {
  151. return;
  152. }
  153. if (con->sched_thread->thread != AST_PTHREADT_NULL) {
  154. ast_mutex_lock(&con->lock);
  155. con->sched_thread->stop = 1;
  156. ast_cond_signal(&con->sched_thread->cond);
  157. ast_mutex_unlock(&con->lock);
  158. pthread_join(con->sched_thread->thread, NULL);
  159. con->sched_thread->thread = AST_PTHREADT_NULL;
  160. }
  161. ast_cond_destroy(&con->sched_thread->cond);
  162. ast_free(con->sched_thread);
  163. con->sched_thread = NULL;
  164. }
  165. int ast_sched_start_thread(struct ast_sched_context *con)
  166. {
  167. struct sched_thread *st;
  168. if (con->sched_thread) {
  169. ast_log(LOG_ERROR, "Thread already started on this scheduler context\n");
  170. return -1;
  171. }
  172. if (!(st = ast_calloc(1, sizeof(*st)))) {
  173. return -1;
  174. }
  175. ast_cond_init(&st->cond, NULL);
  176. st->thread = AST_PTHREADT_NULL;
  177. con->sched_thread = st;
  178. if (ast_pthread_create_background(&st->thread, NULL, sched_run, con)) {
  179. ast_log(LOG_ERROR, "Failed to create scheduler thread\n");
  180. sched_thread_destroy(con);
  181. return -1;
  182. }
  183. return 0;
  184. }
  185. static int sched_time_cmp(void *va, void *vb)
  186. {
  187. struct sched *a = va;
  188. struct sched *b = vb;
  189. int cmp;
  190. cmp = ast_tvcmp(b->when, a->when);
  191. if (!cmp) {
  192. cmp = b->tie_breaker - a->tie_breaker;
  193. }
  194. return cmp;
  195. }
  196. struct ast_sched_context *ast_sched_context_create(void)
  197. {
  198. struct ast_sched_context *tmp;
  199. if (!(tmp = ast_calloc(1, sizeof(*tmp)))) {
  200. return NULL;
  201. }
  202. ast_mutex_init(&tmp->lock);
  203. tmp->eventcnt = 1;
  204. AST_LIST_HEAD_INIT_NOLOCK(&tmp->id_queue);
  205. if (!(tmp->sched_heap = ast_heap_create(8, sched_time_cmp,
  206. offsetof(struct sched, __heap_index)))) {
  207. ast_sched_context_destroy(tmp);
  208. return NULL;
  209. }
  210. return tmp;
  211. }
  212. static void sched_free(struct sched *task)
  213. {
  214. /* task->sched_id will be NULL most of the time, but when the
  215. * scheduler context shuts down, it will free all scheduled
  216. * tasks, and in that case, the task->sched_id will be non-NULL
  217. */
  218. ast_free(task->sched_id);
  219. ast_cond_destroy(&task->cond);
  220. ast_free(task);
  221. }
  222. void ast_sched_context_destroy(struct ast_sched_context *con)
  223. {
  224. struct sched *s;
  225. struct sched_id *sid;
  226. sched_thread_destroy(con);
  227. con->sched_thread = NULL;
  228. ast_mutex_lock(&con->lock);
  229. #ifdef SCHED_MAX_CACHE
  230. while ((s = AST_LIST_REMOVE_HEAD(&con->schedc, list))) {
  231. sched_free(s);
  232. }
  233. #endif
  234. if (con->sched_heap) {
  235. while ((s = ast_heap_pop(con->sched_heap))) {
  236. sched_free(s);
  237. }
  238. ast_heap_destroy(con->sched_heap);
  239. con->sched_heap = NULL;
  240. }
  241. while ((sid = AST_LIST_REMOVE_HEAD(&con->id_queue, list))) {
  242. ast_free(sid);
  243. }
  244. ast_mutex_unlock(&con->lock);
  245. ast_mutex_destroy(&con->lock);
  246. ast_free(con);
  247. }
  248. #define ID_QUEUE_INCREMENT 16
  249. /*!
  250. * \brief Add new scheduler IDs to the queue.
  251. *
  252. * \retval The number of IDs added to the queue
  253. */
  254. static int add_ids(struct ast_sched_context *con)
  255. {
  256. int new_size;
  257. int original_size;
  258. int i;
  259. original_size = con->id_queue_size;
  260. /* So we don't go overboard with the mallocs here, we'll just up
  261. * the size of the list by a fixed amount each time instead of
  262. * multiplying the size by any particular factor
  263. */
  264. new_size = original_size + ID_QUEUE_INCREMENT;
  265. if (new_size < 0) {
  266. /* Overflow. Cap it at INT_MAX. */
  267. new_size = INT_MAX;
  268. }
  269. for (i = original_size; i < new_size; ++i) {
  270. struct sched_id *new_id;
  271. new_id = ast_calloc(1, sizeof(*new_id));
  272. if (!new_id) {
  273. break;
  274. }
  275. /*
  276. * According to the API doxygen a sched ID of 0 is valid.
  277. * Unfortunately, 0 was never returned historically and
  278. * several users incorrectly coded usage of the returned
  279. * sched ID assuming that 0 was invalid.
  280. */
  281. new_id->id = ++con->id_queue_size;
  282. AST_LIST_INSERT_TAIL(&con->id_queue, new_id, list);
  283. }
  284. return con->id_queue_size - original_size;
  285. }
  286. static int set_sched_id(struct ast_sched_context *con, struct sched *new_sched)
  287. {
  288. if (AST_LIST_EMPTY(&con->id_queue) && (add_ids(con) == 0)) {
  289. return -1;
  290. }
  291. new_sched->sched_id = AST_LIST_REMOVE_HEAD(&con->id_queue, list);
  292. return 0;
  293. }
  294. static void sched_release(struct ast_sched_context *con, struct sched *tmp)
  295. {
  296. if (tmp->sched_id) {
  297. AST_LIST_INSERT_TAIL(&con->id_queue, tmp->sched_id, list);
  298. tmp->sched_id = NULL;
  299. }
  300. /*
  301. * Add to the cache, or just free() if we
  302. * already have too many cache entries
  303. */
  304. #ifdef SCHED_MAX_CACHE
  305. if (con->schedccnt < SCHED_MAX_CACHE) {
  306. AST_LIST_INSERT_HEAD(&con->schedc, tmp, list);
  307. con->schedccnt++;
  308. } else
  309. #endif
  310. sched_free(tmp);
  311. }
  312. static struct sched *sched_alloc(struct ast_sched_context *con)
  313. {
  314. struct sched *tmp;
  315. /*
  316. * We keep a small cache of schedule entries
  317. * to minimize the number of necessary malloc()'s
  318. */
  319. #ifdef SCHED_MAX_CACHE
  320. if ((tmp = AST_LIST_REMOVE_HEAD(&con->schedc, list))) {
  321. con->schedccnt--;
  322. } else
  323. #endif
  324. {
  325. tmp = ast_calloc(1, sizeof(*tmp));
  326. if (!tmp) {
  327. return NULL;
  328. }
  329. ast_cond_init(&tmp->cond, NULL);
  330. }
  331. if (set_sched_id(con, tmp)) {
  332. sched_release(con, tmp);
  333. return NULL;
  334. }
  335. return tmp;
  336. }
  337. void ast_sched_clean_by_callback(struct ast_sched_context *con, ast_sched_cb match, ast_sched_cb cleanup_cb)
  338. {
  339. int i = 1;
  340. struct sched *current;
  341. ast_mutex_lock(&con->lock);
  342. while ((current = ast_heap_peek(con->sched_heap, i))) {
  343. if (current->callback != match) {
  344. i++;
  345. continue;
  346. }
  347. ast_heap_remove(con->sched_heap, current);
  348. cleanup_cb(current->data);
  349. sched_release(con, current);
  350. }
  351. ast_mutex_unlock(&con->lock);
  352. }
  353. /*! \brief
  354. * Return the number of milliseconds
  355. * until the next scheduled event
  356. */
  357. int ast_sched_wait(struct ast_sched_context *con)
  358. {
  359. int ms;
  360. struct sched *s;
  361. DEBUG(ast_debug(1, "ast_sched_wait()\n"));
  362. ast_mutex_lock(&con->lock);
  363. if ((s = ast_heap_peek(con->sched_heap, 1))) {
  364. ms = ast_tvdiff_ms(s->when, ast_tvnow());
  365. if (ms < 0) {
  366. ms = 0;
  367. }
  368. } else {
  369. ms = -1;
  370. }
  371. ast_mutex_unlock(&con->lock);
  372. return ms;
  373. }
  374. /*! \brief
  375. * Take a sched structure and put it in the
  376. * queue, such that the soonest event is
  377. * first in the list.
  378. */
  379. static void schedule(struct ast_sched_context *con, struct sched *s)
  380. {
  381. size_t size;
  382. size = ast_heap_size(con->sched_heap);
  383. /* Record the largest the scheduler heap became for reporting purposes. */
  384. if (con->highwater <= size) {
  385. con->highwater = size + 1;
  386. }
  387. /* Determine the tie breaker value for the new entry. */
  388. if (size) {
  389. ++con->tie_breaker;
  390. } else {
  391. /*
  392. * Restart the sequence for the first entry to make integer
  393. * roll over more unlikely.
  394. */
  395. con->tie_breaker = 0;
  396. }
  397. s->tie_breaker = con->tie_breaker;
  398. ast_heap_push(con->sched_heap, s);
  399. }
  400. /*! \brief
  401. * given the last event *tv and the offset in milliseconds 'when',
  402. * computes the next value,
  403. */
  404. static void sched_settime(struct timeval *t, int when)
  405. {
  406. struct timeval now = ast_tvnow();
  407. if (when < 0) {
  408. /*
  409. * A negative when value is likely a bug as it
  410. * represents a VERY large timeout time.
  411. */
  412. ast_log(LOG_WARNING,
  413. "Bug likely: Negative time interval %d (interpreted as %u ms) requested!\n",
  414. when, (unsigned int) when);
  415. ast_assert(0);
  416. }
  417. /*ast_debug(1, "TV -> %lu,%lu\n", tv->tv_sec, tv->tv_usec);*/
  418. if (ast_tvzero(*t)) /* not supplied, default to now */
  419. *t = now;
  420. *t = ast_tvadd(*t, ast_samp2tv(when, 1000));
  421. if (ast_tvcmp(*t, now) < 0) {
  422. *t = now;
  423. }
  424. }
  425. int ast_sched_replace_variable(int old_id, struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data, int variable)
  426. {
  427. /* 0 means the schedule item is new; do not delete */
  428. if (old_id > 0) {
  429. AST_SCHED_DEL(con, old_id);
  430. }
  431. return ast_sched_add_variable(con, when, callback, data, variable);
  432. }
  433. /*! \brief
  434. * Schedule callback(data) to happen when ms into the future
  435. */
  436. int ast_sched_add_variable(struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data, int variable)
  437. {
  438. struct sched *tmp;
  439. int res = -1;
  440. DEBUG(ast_debug(1, "ast_sched_add()\n"));
  441. ast_mutex_lock(&con->lock);
  442. if ((tmp = sched_alloc(con))) {
  443. con->eventcnt++;
  444. tmp->callback = callback;
  445. tmp->data = data;
  446. tmp->resched = when;
  447. tmp->variable = variable;
  448. tmp->when = ast_tv(0, 0);
  449. tmp->deleted = 0;
  450. sched_settime(&tmp->when, when);
  451. schedule(con, tmp);
  452. res = tmp->sched_id->id;
  453. }
  454. #ifdef DUMP_SCHEDULER
  455. /* Dump contents of the context while we have the lock so nothing gets screwed up by accident. */
  456. ast_sched_dump(con);
  457. #endif
  458. if (con->sched_thread) {
  459. ast_cond_signal(&con->sched_thread->cond);
  460. }
  461. ast_mutex_unlock(&con->lock);
  462. return res;
  463. }
  464. int ast_sched_replace(int old_id, struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data)
  465. {
  466. if (old_id > -1) {
  467. AST_SCHED_DEL(con, old_id);
  468. }
  469. return ast_sched_add(con, when, callback, data);
  470. }
  471. int ast_sched_add(struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data)
  472. {
  473. return ast_sched_add_variable(con, when, callback, data, 0);
  474. }
  475. static struct sched *sched_find(struct ast_sched_context *con, int id)
  476. {
  477. int x;
  478. size_t heap_size;
  479. heap_size = ast_heap_size(con->sched_heap);
  480. for (x = 1; x <= heap_size; x++) {
  481. struct sched *cur = ast_heap_peek(con->sched_heap, x);
  482. if (cur->sched_id->id == id) {
  483. return cur;
  484. }
  485. }
  486. return NULL;
  487. }
  488. const void *ast_sched_find_data(struct ast_sched_context *con, int id)
  489. {
  490. struct sched *s;
  491. const void *data = NULL;
  492. ast_mutex_lock(&con->lock);
  493. s = sched_find(con, id);
  494. if (s) {
  495. data = s->data;
  496. }
  497. ast_mutex_unlock(&con->lock);
  498. return data;
  499. }
  500. /*! \brief
  501. * Delete the schedule entry with number
  502. * "id". It's nearly impossible that there
  503. * would be two or more in the list with that
  504. * id.
  505. */
  506. int ast_sched_del(struct ast_sched_context *con, int id)
  507. {
  508. struct sched *s = NULL;
  509. int *last_id = ast_threadstorage_get(&last_del_id, sizeof(int));
  510. DEBUG(ast_debug(1, "ast_sched_del(%d)\n", id));
  511. if (id < 0) {
  512. return 0;
  513. }
  514. ast_mutex_lock(&con->lock);
  515. s = sched_find(con, id);
  516. if (s) {
  517. if (!ast_heap_remove(con->sched_heap, s)) {
  518. ast_log(LOG_WARNING,"sched entry %d not in the sched heap?\n", s->sched_id->id);
  519. }
  520. sched_release(con, s);
  521. } else if (con->currently_executing && (id == con->currently_executing->sched_id->id)) {
  522. s = con->currently_executing;
  523. s->deleted = 1;
  524. /* Wait for executing task to complete so that caller of ast_sched_del() does not
  525. * free memory out from under the task.
  526. */
  527. while (con->currently_executing && (id == con->currently_executing->sched_id->id)) {
  528. ast_cond_wait(&s->cond, &con->lock);
  529. }
  530. /* Do not sched_release() here because ast_sched_runq() will do it */
  531. }
  532. #ifdef DUMP_SCHEDULER
  533. /* Dump contents of the context while we have the lock so nothing gets screwed up by accident. */
  534. ast_sched_dump(con);
  535. #endif
  536. if (con->sched_thread) {
  537. ast_cond_signal(&con->sched_thread->cond);
  538. }
  539. ast_mutex_unlock(&con->lock);
  540. if (!s && *last_id != id) {
  541. ast_debug(1, "Attempted to delete nonexistent schedule entry %d!\n", id);
  542. /* Removing nonexistent schedule entry shouldn't trigger assert (it was enabled in DEV_MODE);
  543. * because in many places entries is deleted without having valid id. */
  544. *last_id = id;
  545. return -1;
  546. } else if (!s) {
  547. return -1;
  548. }
  549. return 0;
  550. }
  551. void ast_sched_report(struct ast_sched_context *con, struct ast_str **buf, struct ast_cb_names *cbnames)
  552. {
  553. int i, x;
  554. struct sched *cur;
  555. int countlist[cbnames->numassocs + 1];
  556. size_t heap_size;
  557. memset(countlist, 0, sizeof(countlist));
  558. ast_str_set(buf, 0, " Highwater = %u\n schedcnt = %zu\n", con->highwater, ast_heap_size(con->sched_heap));
  559. ast_mutex_lock(&con->lock);
  560. heap_size = ast_heap_size(con->sched_heap);
  561. for (x = 1; x <= heap_size; x++) {
  562. cur = ast_heap_peek(con->sched_heap, x);
  563. /* match the callback to the cblist */
  564. for (i = 0; i < cbnames->numassocs; i++) {
  565. if (cur->callback == cbnames->cblist[i]) {
  566. break;
  567. }
  568. }
  569. if (i < cbnames->numassocs) {
  570. countlist[i]++;
  571. } else {
  572. countlist[cbnames->numassocs]++;
  573. }
  574. }
  575. ast_mutex_unlock(&con->lock);
  576. for (i = 0; i < cbnames->numassocs; i++) {
  577. ast_str_append(buf, 0, " %s : %d\n", cbnames->list[i], countlist[i]);
  578. }
  579. ast_str_append(buf, 0, " <unknown> : %d\n", countlist[cbnames->numassocs]);
  580. }
  581. /*! \brief Dump the contents of the scheduler to LOG_DEBUG */
  582. void ast_sched_dump(struct ast_sched_context *con)
  583. {
  584. struct sched *q;
  585. struct timeval when;
  586. int x;
  587. size_t heap_size;
  588. if (!DEBUG_ATLEAST(1)) {
  589. return;
  590. }
  591. when = ast_tvnow();
  592. #ifdef SCHED_MAX_CACHE
  593. ast_log(LOG_DEBUG, "Asterisk Schedule Dump (%zu in Q, %u Total, %u Cache, %u high-water)\n",
  594. ast_heap_size(con->sched_heap), con->eventcnt - 1, con->schedccnt, con->highwater);
  595. #else
  596. ast_log(LOG_DEBUG, "Asterisk Schedule Dump (%zu in Q, %u Total, %u high-water)\n",
  597. ast_heap_size(con->sched_heap), con->eventcnt - 1, con->highwater);
  598. #endif
  599. ast_log(LOG_DEBUG, "=============================================================\n");
  600. ast_log(LOG_DEBUG, "|ID Callback Data Time (sec:ms) |\n");
  601. ast_log(LOG_DEBUG, "+-----+-----------------+-----------------+-----------------+\n");
  602. ast_mutex_lock(&con->lock);
  603. heap_size = ast_heap_size(con->sched_heap);
  604. for (x = 1; x <= heap_size; x++) {
  605. struct timeval delta;
  606. q = ast_heap_peek(con->sched_heap, x);
  607. delta = ast_tvsub(q->when, when);
  608. ast_log(LOG_DEBUG, "|%.4d | %-15p | %-15p | %.6ld : %.6ld |\n",
  609. q->sched_id->id,
  610. q->callback,
  611. q->data,
  612. (long)delta.tv_sec,
  613. (long int)delta.tv_usec);
  614. }
  615. ast_mutex_unlock(&con->lock);
  616. ast_log(LOG_DEBUG, "=============================================================\n");
  617. }
  618. /*! \brief
  619. * Launch all events which need to be run at this time.
  620. */
  621. int ast_sched_runq(struct ast_sched_context *con)
  622. {
  623. struct sched *current;
  624. struct timeval when;
  625. int numevents;
  626. int res;
  627. DEBUG(ast_debug(1, "ast_sched_runq()\n"));
  628. ast_mutex_lock(&con->lock);
  629. when = ast_tvadd(ast_tvnow(), ast_tv(0, 1000));
  630. for (numevents = 0; (current = ast_heap_peek(con->sched_heap, 1)); numevents++) {
  631. /* schedule all events which are going to expire within 1ms.
  632. * We only care about millisecond accuracy anyway, so this will
  633. * help us get more than one event at one time if they are very
  634. * close together.
  635. */
  636. if (ast_tvcmp(current->when, when) != -1) {
  637. break;
  638. }
  639. current = ast_heap_pop(con->sched_heap);
  640. /*
  641. * At this point, the schedule queue is still intact. We
  642. * have removed the first event and the rest is still there,
  643. * so it's permissible for the callback to add new events, but
  644. * trying to delete itself won't work because it isn't in
  645. * the schedule queue. If that's what it wants to do, it
  646. * should return 0.
  647. */
  648. con->currently_executing = current;
  649. ast_mutex_unlock(&con->lock);
  650. res = current->callback(current->data);
  651. ast_mutex_lock(&con->lock);
  652. con->currently_executing = NULL;
  653. ast_cond_signal(&current->cond);
  654. if (res && !current->deleted) {
  655. /*
  656. * If they return non-zero, we should schedule them to be
  657. * run again.
  658. */
  659. sched_settime(&current->when, current->variable ? res : current->resched);
  660. schedule(con, current);
  661. } else {
  662. /* No longer needed, so release it */
  663. sched_release(con, current);
  664. }
  665. }
  666. ast_mutex_unlock(&con->lock);
  667. return numevents;
  668. }
  669. long ast_sched_when(struct ast_sched_context *con,int id)
  670. {
  671. struct sched *s;
  672. long secs = -1;
  673. DEBUG(ast_debug(1, "ast_sched_when()\n"));
  674. ast_mutex_lock(&con->lock);
  675. s = sched_find(con, id);
  676. if (s) {
  677. struct timeval now = ast_tvnow();
  678. secs = s->when.tv_sec - now.tv_sec;
  679. }
  680. ast_mutex_unlock(&con->lock);
  681. return secs;
  682. }