sched.c: Add warning about negative time interval request.
[asterisk/asterisk.git] / main / sched.c
1 /*
2  * Asterisk -- An open source telephony toolkit.
3  *
4  * Copyright (C) 1999 - 2010, Digium, Inc.
5  *
6  * Mark Spencer <markster@digium.com>
7  * Russell Bryant <russell@digium.com>
8  *
9  * See http://www.asterisk.org for more information about
10  * the Asterisk project. Please do not directly contact
11  * any of the maintainers of this project for assistance;
12  * the project provides a web site, mailing lists and IRC
13  * channels for your use.
14  *
15  * This program is free software, distributed under the terms of
16  * the GNU General Public License Version 2. See the LICENSE file
17  * at the top of the source tree.
18  */
19
20 /*! \file
21  *
22  * \brief Scheduler Routines (from cheops-NG)
23  *
24  * \author Mark Spencer <markster@digium.com>
25  */
26
27 /*** MODULEINFO
28         <support_level>core</support_level>
29  ***/
30
31 #include "asterisk.h"
32
33 ASTERISK_REGISTER_FILE()
34
35 #ifdef DEBUG_SCHEDULER
36 #define DEBUG(a) do { \
37         if (option_debug) \
38                 DEBUG_M(a) \
39         } while (0)
40 #else
41 #define DEBUG(a)
42 #endif
43
44 #include <sys/time.h>
45
46 #include "asterisk/sched.h"
47 #include "asterisk/channel.h"
48 #include "asterisk/lock.h"
49 #include "asterisk/utils.h"
50 #include "asterisk/heap.h"
51 #include "asterisk/threadstorage.h"
52
53 /*!
54  * \brief Max num of schedule structs
55  *
56  * \note The max number of schedule structs to keep around
57  * for use.  Undefine to disable schedule structure
58  * caching. (Only disable this on very low memory
59  * machines)
60  */
61 #define SCHED_MAX_CACHE 128
62
63 AST_THREADSTORAGE(last_del_id);
64
65 /*!
66  * \brief Scheduler ID holder
67  *
68  * These form a queue on a scheduler context. When a new
69  * scheduled item is created, a sched_id is popped off the
70  * queue and its id is assigned to the new scheduled item.
71  * When the scheduled task is complete, the sched_id on that
72  * task is then pushed to the back of the queue to be re-used
73  * on some future scheduled item.
74  */
75 struct sched_id {
76         /*! Immutable ID number that is copied onto the scheduled task */
77         int id;
78         AST_LIST_ENTRY(sched_id) list;
79 };
80
81 struct sched {
82         AST_LIST_ENTRY(sched) list;
83         /*! The ID that has been popped off the scheduler context's queue */
84         struct sched_id *sched_id;
85         struct timeval when;          /*!< Absolute time event should take place */
86         int resched;                  /*!< When to reschedule */
87         int variable;                 /*!< Use return value from callback to reschedule */
88         const void *data;             /*!< Data */
89         ast_sched_cb callback;        /*!< Callback */
90         ssize_t __heap_index;
91         /*!
92          * Used to synchronize between thread running a task and thread
93          * attempting to delete a task
94          */
95         ast_cond_t cond;
96         /*! Indication that a running task was deleted. */
97         unsigned int deleted:1;
98 };
99
100 struct sched_thread {
101         pthread_t thread;
102         ast_cond_t cond;
103         unsigned int stop:1;
104 };
105
106 struct ast_sched_context {
107         ast_mutex_t lock;
108         unsigned int eventcnt;                  /*!< Number of events processed */
109         unsigned int highwater;                                 /*!< highest count so far */
110         struct ast_heap *sched_heap;
111         struct sched_thread *sched_thread;
112         /*! The scheduled task that is currently executing */
113         struct sched *currently_executing;
114
115 #ifdef SCHED_MAX_CACHE
116         AST_LIST_HEAD_NOLOCK(, sched) schedc;   /*!< Cache of unused schedule structures and how many */
117         unsigned int schedccnt;
118 #endif
119         /*! Queue of scheduler task IDs to assign */
120         AST_LIST_HEAD_NOLOCK(, sched_id) id_queue;
121         /*! The number of IDs in the id_queue */
122         int id_queue_size;
123 };
124
125 static void *sched_run(void *data)
126 {
127         struct ast_sched_context *con = data;
128
129         while (!con->sched_thread->stop) {
130                 int ms;
131                 struct timespec ts = {
132                         .tv_sec = 0,
133                 };
134
135                 ast_mutex_lock(&con->lock);
136
137                 if (con->sched_thread->stop) {
138                         ast_mutex_unlock(&con->lock);
139                         return NULL;
140                 }
141
142                 ms = ast_sched_wait(con);
143
144                 if (ms == -1) {
145                         ast_cond_wait(&con->sched_thread->cond, &con->lock);
146                 } else {
147                         struct timeval tv;
148                         tv = ast_tvadd(ast_tvnow(), ast_samp2tv(ms, 1000));
149                         ts.tv_sec = tv.tv_sec;
150                         ts.tv_nsec = tv.tv_usec * 1000;
151                         ast_cond_timedwait(&con->sched_thread->cond, &con->lock, &ts);
152                 }
153
154                 ast_mutex_unlock(&con->lock);
155
156                 if (con->sched_thread->stop) {
157                         return NULL;
158                 }
159
160                 ast_sched_runq(con);
161         }
162
163         return NULL;
164 }
165
166 static void sched_thread_destroy(struct ast_sched_context *con)
167 {
168         if (!con->sched_thread) {
169                 return;
170         }
171
172         if (con->sched_thread->thread != AST_PTHREADT_NULL) {
173                 ast_mutex_lock(&con->lock);
174                 con->sched_thread->stop = 1;
175                 ast_cond_signal(&con->sched_thread->cond);
176                 ast_mutex_unlock(&con->lock);
177                 pthread_join(con->sched_thread->thread, NULL);
178                 con->sched_thread->thread = AST_PTHREADT_NULL;
179         }
180
181         ast_cond_destroy(&con->sched_thread->cond);
182
183         ast_free(con->sched_thread);
184
185         con->sched_thread = NULL;
186 }
187
188 int ast_sched_start_thread(struct ast_sched_context *con)
189 {
190         struct sched_thread *st;
191
192         if (con->sched_thread) {
193                 ast_log(LOG_ERROR, "Thread already started on this scheduler context\n");
194                 return -1;
195         }
196
197         if (!(st = ast_calloc(1, sizeof(*st)))) {
198                 return -1;
199         }
200
201         ast_cond_init(&st->cond, NULL);
202
203         st->thread = AST_PTHREADT_NULL;
204
205         con->sched_thread = st;
206
207         if (ast_pthread_create_background(&st->thread, NULL, sched_run, con)) {
208                 ast_log(LOG_ERROR, "Failed to create scheduler thread\n");
209                 sched_thread_destroy(con);
210                 return -1;
211         }
212
213         return 0;
214 }
215
216 static int sched_time_cmp(void *a, void *b)
217 {
218         return ast_tvcmp(((struct sched *) b)->when, ((struct sched *) a)->when);
219 }
220
221 struct ast_sched_context *ast_sched_context_create(void)
222 {
223         struct ast_sched_context *tmp;
224
225         if (!(tmp = ast_calloc(1, sizeof(*tmp)))) {
226                 return NULL;
227         }
228
229         ast_mutex_init(&tmp->lock);
230         tmp->eventcnt = 1;
231
232         AST_LIST_HEAD_INIT_NOLOCK(&tmp->id_queue);
233
234         if (!(tmp->sched_heap = ast_heap_create(8, sched_time_cmp,
235                         offsetof(struct sched, __heap_index)))) {
236                 ast_sched_context_destroy(tmp);
237                 return NULL;
238         }
239
240         return tmp;
241 }
242
243 static void sched_free(struct sched *task)
244 {
245         /* task->sched_id will be NULL most of the time, but when the
246          * scheduler context shuts down, it will free all scheduled
247          * tasks, and in that case, the task->sched_id will be non-NULL
248          */
249         ast_free(task->sched_id);
250         ast_cond_destroy(&task->cond);
251         ast_free(task);
252 }
253
254 void ast_sched_context_destroy(struct ast_sched_context *con)
255 {
256         struct sched *s;
257         struct sched_id *sid;
258
259         sched_thread_destroy(con);
260         con->sched_thread = NULL;
261
262         ast_mutex_lock(&con->lock);
263
264 #ifdef SCHED_MAX_CACHE
265         while ((s = AST_LIST_REMOVE_HEAD(&con->schedc, list))) {
266                 sched_free(s);
267         }
268 #endif
269
270         if (con->sched_heap) {
271                 while ((s = ast_heap_pop(con->sched_heap))) {
272                         sched_free(s);
273                 }
274                 ast_heap_destroy(con->sched_heap);
275                 con->sched_heap = NULL;
276         }
277
278         while ((sid = AST_LIST_REMOVE_HEAD(&con->id_queue, list))) {
279                 ast_free(sid);
280         }
281
282         ast_mutex_unlock(&con->lock);
283         ast_mutex_destroy(&con->lock);
284
285         ast_free(con);
286 }
287
288 #define ID_QUEUE_INCREMENT 16
289
290 /*!
291  * \brief Add new scheduler IDs to the queue.
292  *
293  * \retval The number of IDs added to the queue
294  */
295 static int add_ids(struct ast_sched_context *con)
296 {
297         int new_size;
298         int original_size;
299         int i;
300
301         original_size = con->id_queue_size;
302         /* So we don't go overboard with the mallocs here, we'll just up
303          * the size of the list by a fixed amount each time instead of
304          * multiplying the size by any particular factor
305          */
306         new_size = original_size + ID_QUEUE_INCREMENT;
307         if (new_size < 0) {
308                 /* Overflow. Cap it at INT_MAX. */
309                 new_size = INT_MAX;
310         }
311         for (i = original_size; i < new_size; ++i) {
312                 struct sched_id *new_id;
313
314                 new_id = ast_calloc(1, sizeof(*new_id));
315                 if (!new_id) {
316                         break;
317                 }
318                 new_id->id = i;
319                 AST_LIST_INSERT_TAIL(&con->id_queue, new_id, list);
320                 ++con->id_queue_size;
321         }
322
323         return con->id_queue_size - original_size;
324 }
325
326 static int set_sched_id(struct ast_sched_context *con, struct sched *new_sched)
327 {
328         if (AST_LIST_EMPTY(&con->id_queue) && (add_ids(con) == 0)) {
329                 return -1;
330         }
331
332         new_sched->sched_id = AST_LIST_REMOVE_HEAD(&con->id_queue, list);
333         return 0;
334 }
335
336 static void sched_release(struct ast_sched_context *con, struct sched *tmp)
337 {
338         if (tmp->sched_id) {
339                 AST_LIST_INSERT_TAIL(&con->id_queue, tmp->sched_id, list);
340                 tmp->sched_id = NULL;
341         }
342
343         /*
344          * Add to the cache, or just free() if we
345          * already have too many cache entries
346          */
347 #ifdef SCHED_MAX_CACHE
348         if (con->schedccnt < SCHED_MAX_CACHE) {
349                 AST_LIST_INSERT_HEAD(&con->schedc, tmp, list);
350                 con->schedccnt++;
351         } else
352 #endif
353                 sched_free(tmp);
354 }
355
356 static struct sched *sched_alloc(struct ast_sched_context *con)
357 {
358         struct sched *tmp;
359
360         /*
361          * We keep a small cache of schedule entries
362          * to minimize the number of necessary malloc()'s
363          */
364 #ifdef SCHED_MAX_CACHE
365         if ((tmp = AST_LIST_REMOVE_HEAD(&con->schedc, list))) {
366                 con->schedccnt--;
367         } else
368 #endif
369         {
370                 tmp = ast_calloc(1, sizeof(*tmp));
371                 if (!tmp) {
372                         return NULL;
373                 }
374                 ast_cond_init(&tmp->cond, NULL);
375         }
376
377         if (set_sched_id(con, tmp)) {
378                 sched_release(con, tmp);
379                 return NULL;
380         }
381
382         return tmp;
383 }
384
385 void ast_sched_clean_by_callback(struct ast_sched_context *con, ast_sched_cb match, ast_sched_cb cleanup_cb)
386 {
387         int i = 1;
388         struct sched *current;
389
390         ast_mutex_lock(&con->lock);
391         while ((current = ast_heap_peek(con->sched_heap, i))) {
392                 if (current->callback != match) {
393                         i++;
394                         continue;
395                 }
396
397                 ast_heap_remove(con->sched_heap, current);
398
399                 cleanup_cb(current->data);
400                 sched_release(con, current);
401         }
402         ast_mutex_unlock(&con->lock);
403 }
404
405 /*! \brief
406  * Return the number of milliseconds
407  * until the next scheduled event
408  */
409 int ast_sched_wait(struct ast_sched_context *con)
410 {
411         int ms;
412         struct sched *s;
413
414         DEBUG(ast_debug(1, "ast_sched_wait()\n"));
415
416         ast_mutex_lock(&con->lock);
417         if ((s = ast_heap_peek(con->sched_heap, 1))) {
418                 ms = ast_tvdiff_ms(s->when, ast_tvnow());
419                 if (ms < 0) {
420                         ms = 0;
421                 }
422         } else {
423                 ms = -1;
424         }
425         ast_mutex_unlock(&con->lock);
426
427         return ms;
428 }
429
430
431 /*! \brief
432  * Take a sched structure and put it in the
433  * queue, such that the soonest event is
434  * first in the list.
435  */
436 static void schedule(struct ast_sched_context *con, struct sched *s)
437 {
438         ast_heap_push(con->sched_heap, s);
439
440         if (ast_heap_size(con->sched_heap) > con->highwater) {
441                 con->highwater = ast_heap_size(con->sched_heap);
442         }
443 }
444
445 /*! \brief
446  * given the last event *tv and the offset in milliseconds 'when',
447  * computes the next value,
448  */
449 static int sched_settime(struct timeval *t, int when)
450 {
451         struct timeval now = ast_tvnow();
452
453         if (when < 0) {
454                 /*
455                  * A negative when value is likely a bug as it
456                  * represents a VERY large timeout time.
457                  */
458                 ast_log(LOG_WARNING,
459                         "Bug likely: Negative time interval %d (interpreted as %u ms) requested!\n",
460                         when, (unsigned int) when);
461                 ast_assert(0);
462         }
463
464         /*ast_debug(1, "TV -> %lu,%lu\n", tv->tv_sec, tv->tv_usec);*/
465         if (ast_tvzero(*t))     /* not supplied, default to now */
466                 *t = now;
467         *t = ast_tvadd(*t, ast_samp2tv(when, 1000));
468         if (ast_tvcmp(*t, now) < 0) {
469                 *t = now;
470         }
471         return 0;
472 }
473
474 int ast_sched_replace_variable(int old_id, struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data, int variable)
475 {
476         /* 0 means the schedule item is new; do not delete */
477         if (old_id > 0) {
478                 AST_SCHED_DEL(con, old_id);
479         }
480         return ast_sched_add_variable(con, when, callback, data, variable);
481 }
482
483 /*! \brief
484  * Schedule callback(data) to happen when ms into the future
485  */
486 int ast_sched_add_variable(struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data, int variable)
487 {
488         struct sched *tmp;
489         int res = -1;
490
491         DEBUG(ast_debug(1, "ast_sched_add()\n"));
492
493         ast_mutex_lock(&con->lock);
494         if ((tmp = sched_alloc(con))) {
495                 con->eventcnt++;
496                 tmp->callback = callback;
497                 tmp->data = data;
498                 tmp->resched = when;
499                 tmp->variable = variable;
500                 tmp->when = ast_tv(0, 0);
501                 tmp->deleted = 0;
502                 if (sched_settime(&tmp->when, when)) {
503                         sched_release(con, tmp);
504                 } else {
505                         schedule(con, tmp);
506                         res = tmp->sched_id->id;
507                 }
508         }
509 #ifdef DUMP_SCHEDULER
510         /* Dump contents of the context while we have the lock so nothing gets screwed up by accident. */
511         if (option_debug)
512                 ast_sched_dump(con);
513 #endif
514         if (con->sched_thread) {
515                 ast_cond_signal(&con->sched_thread->cond);
516         }
517         ast_mutex_unlock(&con->lock);
518
519         return res;
520 }
521
522 int ast_sched_replace(int old_id, struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data)
523 {
524         if (old_id > -1) {
525                 AST_SCHED_DEL(con, old_id);
526         }
527         return ast_sched_add(con, when, callback, data);
528 }
529
530 int ast_sched_add(struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data)
531 {
532         return ast_sched_add_variable(con, when, callback, data, 0);
533 }
534
535 static struct sched *sched_find(struct ast_sched_context *con, int id)
536 {
537         int x;
538         size_t heap_size;
539
540         heap_size = ast_heap_size(con->sched_heap);
541         for (x = 1; x <= heap_size; x++) {
542                 struct sched *cur = ast_heap_peek(con->sched_heap, x);
543
544                 if (cur->sched_id->id == id) {
545                         return cur;
546                 }
547         }
548
549         return NULL;
550 }
551
552 const void *ast_sched_find_data(struct ast_sched_context *con, int id)
553 {
554         struct sched *s;
555         const void *data = NULL;
556
557         ast_mutex_lock(&con->lock);
558
559         s = sched_find(con, id);
560         if (s) {
561                 data = s->data;
562         }
563
564         ast_mutex_unlock(&con->lock);
565
566         return data;
567 }
568
569 /*! \brief
570  * Delete the schedule entry with number
571  * "id".  It's nearly impossible that there
572  * would be two or more in the list with that
573  * id.
574  */
575 #ifndef AST_DEVMODE
576 int ast_sched_del(struct ast_sched_context *con, int id)
577 #else
578 int _ast_sched_del(struct ast_sched_context *con, int id, const char *file, int line, const char *function)
579 #endif
580 {
581         struct sched *s = NULL;
582         int *last_id = ast_threadstorage_get(&last_del_id, sizeof(int));
583
584         DEBUG(ast_debug(1, "ast_sched_del(%d)\n", id));
585
586         if (id < 0) {
587                 return 0;
588         }
589
590         ast_mutex_lock(&con->lock);
591
592         s = sched_find(con, id);
593         if (s) {
594                 if (!ast_heap_remove(con->sched_heap, s)) {
595                         ast_log(LOG_WARNING,"sched entry %d not in the sched heap?\n", s->sched_id->id);
596                 }
597                 sched_release(con, s);
598         } else if (con->currently_executing && (id == con->currently_executing->sched_id->id)) {
599                 s = con->currently_executing;
600                 s->deleted = 1;
601                 /* Wait for executing task to complete so that caller of ast_sched_del() does not
602                  * free memory out from under the task.
603                  */
604                 while (con->currently_executing && (id == con->currently_executing->sched_id->id)) {
605                         ast_cond_wait(&s->cond, &con->lock);
606                 }
607                 /* Do not sched_release() here because ast_sched_runq() will do it */
608         }
609
610 #ifdef DUMP_SCHEDULER
611         /* Dump contents of the context while we have the lock so nothing gets screwed up by accident. */
612         if (option_debug)
613                 ast_sched_dump(con);
614 #endif
615         if (con->sched_thread) {
616                 ast_cond_signal(&con->sched_thread->cond);
617         }
618         ast_mutex_unlock(&con->lock);
619
620         if (!s && *last_id != id) {
621                 ast_debug(1, "Attempted to delete nonexistent schedule entry %d!\n", id);
622                 /* Removing nonexistent schedule entry shouldn't trigger assert (it was enabled in DEV_MODE);
623                  * because in many places entries is deleted without having valid id. */
624                 *last_id = id;
625                 return -1;
626         } else if (!s) {
627                 return -1;
628         }
629
630         return 0;
631 }
632
633 void ast_sched_report(struct ast_sched_context *con, struct ast_str **buf, struct ast_cb_names *cbnames)
634 {
635         int i, x;
636         struct sched *cur;
637         int countlist[cbnames->numassocs + 1];
638         size_t heap_size;
639
640         memset(countlist, 0, sizeof(countlist));
641         ast_str_set(buf, 0, " Highwater = %u\n schedcnt = %zu\n", con->highwater, ast_heap_size(con->sched_heap));
642
643         ast_mutex_lock(&con->lock);
644
645         heap_size = ast_heap_size(con->sched_heap);
646         for (x = 1; x <= heap_size; x++) {
647                 cur = ast_heap_peek(con->sched_heap, x);
648                 /* match the callback to the cblist */
649                 for (i = 0; i < cbnames->numassocs; i++) {
650                         if (cur->callback == cbnames->cblist[i]) {
651                                 break;
652                         }
653                 }
654                 if (i < cbnames->numassocs) {
655                         countlist[i]++;
656                 } else {
657                         countlist[cbnames->numassocs]++;
658                 }
659         }
660
661         ast_mutex_unlock(&con->lock);
662
663         for (i = 0; i < cbnames->numassocs; i++) {
664                 ast_str_append(buf, 0, "    %s : %d\n", cbnames->list[i], countlist[i]);
665         }
666
667         ast_str_append(buf, 0, "   <unknown> : %d\n", countlist[cbnames->numassocs]);
668 }
669
670 /*! \brief Dump the contents of the scheduler to LOG_DEBUG */
671 void ast_sched_dump(struct ast_sched_context *con)
672 {
673         struct sched *q;
674         struct timeval when = ast_tvnow();
675         int x;
676         size_t heap_size;
677 #ifdef SCHED_MAX_CACHE
678         ast_debug(1, "Asterisk Schedule Dump (%zu in Q, %u Total, %u Cache, %u high-water)\n", ast_heap_size(con->sched_heap), con->eventcnt - 1, con->schedccnt, con->highwater);
679 #else
680         ast_debug(1, "Asterisk Schedule Dump (%zu in Q, %u Total, %u high-water)\n", ast_heap_size(con->sched_heap), con->eventcnt - 1, con->highwater);
681 #endif
682
683         ast_debug(1, "=============================================================\n");
684         ast_debug(1, "|ID    Callback          Data              Time  (sec:ms)   |\n");
685         ast_debug(1, "+-----+-----------------+-----------------+-----------------+\n");
686         ast_mutex_lock(&con->lock);
687         heap_size = ast_heap_size(con->sched_heap);
688         for (x = 1; x <= heap_size; x++) {
689                 struct timeval delta;
690                 q = ast_heap_peek(con->sched_heap, x);
691                 delta = ast_tvsub(q->when, when);
692                 ast_debug(1, "|%.4d | %-15p | %-15p | %.6ld : %.6ld |\n",
693                         q->sched_id->id,
694                         q->callback,
695                         q->data,
696                         (long)delta.tv_sec,
697                         (long int)delta.tv_usec);
698         }
699         ast_mutex_unlock(&con->lock);
700         ast_debug(1, "=============================================================\n");
701 }
702
703 /*! \brief
704  * Launch all events which need to be run at this time.
705  */
706 int ast_sched_runq(struct ast_sched_context *con)
707 {
708         struct sched *current;
709         struct timeval when;
710         int numevents;
711         int res;
712
713         DEBUG(ast_debug(1, "ast_sched_runq()\n"));
714
715         ast_mutex_lock(&con->lock);
716
717         when = ast_tvadd(ast_tvnow(), ast_tv(0, 1000));
718         for (numevents = 0; (current = ast_heap_peek(con->sched_heap, 1)); numevents++) {
719                 /* schedule all events which are going to expire within 1ms.
720                  * We only care about millisecond accuracy anyway, so this will
721                  * help us get more than one event at one time if they are very
722                  * close together.
723                  */
724                 if (ast_tvcmp(current->when, when) != -1) {
725                         break;
726                 }
727
728                 current = ast_heap_pop(con->sched_heap);
729
730                 /*
731                  * At this point, the schedule queue is still intact.  We
732                  * have removed the first event and the rest is still there,
733                  * so it's permissible for the callback to add new events, but
734                  * trying to delete itself won't work because it isn't in
735                  * the schedule queue.  If that's what it wants to do, it
736                  * should return 0.
737                  */
738
739                 con->currently_executing = current;
740                 ast_mutex_unlock(&con->lock);
741                 res = current->callback(current->data);
742                 ast_mutex_lock(&con->lock);
743                 con->currently_executing = NULL;
744                 ast_cond_signal(&current->cond);
745
746                 if (res && !current->deleted) {
747                         /*
748                          * If they return non-zero, we should schedule them to be
749                          * run again.
750                          */
751                         if (sched_settime(&current->when, current->variable? res : current->resched)) {
752                                 sched_release(con, current);
753                         } else {
754                                 schedule(con, current);
755                         }
756                 } else {
757                         /* No longer needed, so release it */
758                         sched_release(con, current);
759                 }
760         }
761
762         ast_mutex_unlock(&con->lock);
763
764         return numevents;
765 }
766
767 long ast_sched_when(struct ast_sched_context *con,int id)
768 {
769         struct sched *s;
770         long secs = -1;
771         DEBUG(ast_debug(1, "ast_sched_when()\n"));
772
773         ast_mutex_lock(&con->lock);
774
775         s = sched_find(con, id);
776         if (s) {
777                 struct timeval now = ast_tvnow();
778                 secs = s->when.tv_sec - now.tv_sec;
779         }
780
781         ast_mutex_unlock(&con->lock);
782
783         return secs;
784 }