sched: ast_sched_del may return prematurely due to spurious wakeup
[asterisk/asterisk.git] / main / sched.c
1 /*
2  * Asterisk -- An open source telephony toolkit.
3  *
4  * Copyright (C) 1999 - 2010, Digium, Inc.
5  *
6  * Mark Spencer <markster@digium.com>
7  * Russell Bryant <russell@digium.com>
8  *
9  * See http://www.asterisk.org for more information about
10  * the Asterisk project. Please do not directly contact
11  * any of the maintainers of this project for assistance;
12  * the project provides a web site, mailing lists and IRC
13  * channels for your use.
14  *
15  * This program is free software, distributed under the terms of
16  * the GNU General Public License Version 2. See the LICENSE file
17  * at the top of the source tree.
18  */
19
20 /*! \file
21  *
22  * \brief Scheduler Routines (from cheops-NG)
23  *
24  * \author Mark Spencer <markster@digium.com>
25  */
26
27 /*** MODULEINFO
28         <support_level>core</support_level>
29  ***/
30
31 #include "asterisk.h"
32
33 ASTERISK_REGISTER_FILE()
34
35 #ifdef DEBUG_SCHEDULER
36 #define DEBUG(a) do { \
37         if (option_debug) \
38                 DEBUG_M(a) \
39         } while (0)
40 #else
41 #define DEBUG(a)
42 #endif
43
44 #include <sys/time.h>
45
46 #include "asterisk/sched.h"
47 #include "asterisk/channel.h"
48 #include "asterisk/lock.h"
49 #include "asterisk/utils.h"
50 #include "asterisk/heap.h"
51 #include "asterisk/threadstorage.h"
52
53 /*!
54  * \brief Max num of schedule structs
55  *
56  * \note The max number of schedule structs to keep around
57  * for use.  Undefine to disable schedule structure
58  * caching. (Only disable this on very low memory
59  * machines)
60  */
61 #define SCHED_MAX_CACHE 128
62
63 AST_THREADSTORAGE(last_del_id);
64
65 struct sched {
66         AST_LIST_ENTRY(sched) list;
67         int id;                       /*!< ID number of event */
68         struct timeval when;          /*!< Absolute time event should take place */
69         int resched;                  /*!< When to reschedule */
70         int variable;                 /*!< Use return value from callback to reschedule */
71         const void *data;             /*!< Data */
72         ast_sched_cb callback;        /*!< Callback */
73         ssize_t __heap_index;
74         /*!
75          * Used to synchronize between thread running a task and thread
76          * attempting to delete a task
77          */
78         ast_cond_t cond;
79         /*! Indication that a running task was deleted. */
80         unsigned int deleted:1;
81 };
82
83 struct sched_thread {
84         pthread_t thread;
85         ast_cond_t cond;
86         unsigned int stop:1;
87 };
88
89 struct ast_sched_context {
90         ast_mutex_t lock;
91         unsigned int eventcnt;                  /*!< Number of events processed */
92         unsigned int highwater;                                 /*!< highest count so far */
93         struct ast_heap *sched_heap;
94         struct sched_thread *sched_thread;
95         /*! The scheduled task that is currently executing */
96         struct sched *currently_executing;
97
98 #ifdef SCHED_MAX_CACHE
99         AST_LIST_HEAD_NOLOCK(, sched) schedc;   /*!< Cache of unused schedule structures and how many */
100         unsigned int schedccnt;
101 #endif
102 };
103
104 static void *sched_run(void *data)
105 {
106         struct ast_sched_context *con = data;
107
108         while (!con->sched_thread->stop) {
109                 int ms;
110                 struct timespec ts = {
111                         .tv_sec = 0,
112                 };
113
114                 ast_mutex_lock(&con->lock);
115
116                 if (con->sched_thread->stop) {
117                         ast_mutex_unlock(&con->lock);
118                         return NULL;
119                 }
120
121                 ms = ast_sched_wait(con);
122
123                 if (ms == -1) {
124                         ast_cond_wait(&con->sched_thread->cond, &con->lock);
125                 } else {
126                         struct timeval tv;
127                         tv = ast_tvadd(ast_tvnow(), ast_samp2tv(ms, 1000));
128                         ts.tv_sec = tv.tv_sec;
129                         ts.tv_nsec = tv.tv_usec * 1000;
130                         ast_cond_timedwait(&con->sched_thread->cond, &con->lock, &ts);
131                 }
132
133                 ast_mutex_unlock(&con->lock);
134
135                 if (con->sched_thread->stop) {
136                         return NULL;
137                 }
138
139                 ast_sched_runq(con);
140         }
141
142         return NULL;
143 }
144
145 static void sched_thread_destroy(struct ast_sched_context *con)
146 {
147         if (!con->sched_thread) {
148                 return;
149         }
150
151         if (con->sched_thread->thread != AST_PTHREADT_NULL) {
152                 ast_mutex_lock(&con->lock);
153                 con->sched_thread->stop = 1;
154                 ast_cond_signal(&con->sched_thread->cond);
155                 ast_mutex_unlock(&con->lock);
156                 pthread_join(con->sched_thread->thread, NULL);
157                 con->sched_thread->thread = AST_PTHREADT_NULL;
158         }
159
160         ast_cond_destroy(&con->sched_thread->cond);
161
162         ast_free(con->sched_thread);
163
164         con->sched_thread = NULL;
165 }
166
167 int ast_sched_start_thread(struct ast_sched_context *con)
168 {
169         struct sched_thread *st;
170
171         if (con->sched_thread) {
172                 ast_log(LOG_ERROR, "Thread already started on this scheduler context\n");
173                 return -1;
174         }
175
176         if (!(st = ast_calloc(1, sizeof(*st)))) {
177                 return -1;
178         }
179
180         ast_cond_init(&st->cond, NULL);
181
182         st->thread = AST_PTHREADT_NULL;
183
184         con->sched_thread = st;
185
186         if (ast_pthread_create_background(&st->thread, NULL, sched_run, con)) {
187                 ast_log(LOG_ERROR, "Failed to create scheduler thread\n");
188                 sched_thread_destroy(con);
189                 return -1;
190         }
191
192         return 0;
193 }
194
195 static int sched_time_cmp(void *a, void *b)
196 {
197         return ast_tvcmp(((struct sched *) b)->when, ((struct sched *) a)->when);
198 }
199
200 struct ast_sched_context *ast_sched_context_create(void)
201 {
202         struct ast_sched_context *tmp;
203
204         if (!(tmp = ast_calloc(1, sizeof(*tmp)))) {
205                 return NULL;
206         }
207
208         ast_mutex_init(&tmp->lock);
209         tmp->eventcnt = 1;
210
211         if (!(tmp->sched_heap = ast_heap_create(8, sched_time_cmp,
212                         offsetof(struct sched, __heap_index)))) {
213                 ast_sched_context_destroy(tmp);
214                 return NULL;
215         }
216
217         return tmp;
218 }
219
220 static void sched_free(struct sched *task)
221 {
222         ast_cond_destroy(&task->cond);
223         ast_free(task);
224 }
225
226 void ast_sched_context_destroy(struct ast_sched_context *con)
227 {
228         struct sched *s;
229
230         sched_thread_destroy(con);
231         con->sched_thread = NULL;
232
233         ast_mutex_lock(&con->lock);
234
235 #ifdef SCHED_MAX_CACHE
236         while ((s = AST_LIST_REMOVE_HEAD(&con->schedc, list))) {
237                 sched_free(s);
238         }
239 #endif
240
241         if (con->sched_heap) {
242                 while ((s = ast_heap_pop(con->sched_heap))) {
243                         sched_free(s);
244                 }
245                 ast_heap_destroy(con->sched_heap);
246                 con->sched_heap = NULL;
247         }
248
249         ast_mutex_unlock(&con->lock);
250         ast_mutex_destroy(&con->lock);
251
252         ast_free(con);
253 }
254
255 static struct sched *sched_alloc(struct ast_sched_context *con)
256 {
257         struct sched *tmp;
258
259         /*
260          * We keep a small cache of schedule entries
261          * to minimize the number of necessary malloc()'s
262          */
263 #ifdef SCHED_MAX_CACHE
264         if ((tmp = AST_LIST_REMOVE_HEAD(&con->schedc, list))) {
265                 con->schedccnt--;
266         } else 
267 #endif
268         {
269                 tmp = ast_calloc(1, sizeof(*tmp));
270                 ast_cond_init(&tmp->cond, NULL);
271         }
272
273         return tmp;
274 }
275
276 static void sched_release(struct ast_sched_context *con, struct sched *tmp)
277 {
278         /*
279          * Add to the cache, or just free() if we
280          * already have too many cache entries
281          */
282
283 #ifdef SCHED_MAX_CACHE
284         if (con->schedccnt < SCHED_MAX_CACHE) {
285                 AST_LIST_INSERT_HEAD(&con->schedc, tmp, list);
286                 con->schedccnt++;
287         } else
288 #endif
289                 sched_free(tmp);
290 }
291
292 void ast_sched_clean_by_callback(struct ast_sched_context *con, ast_sched_cb match, ast_sched_cb cleanup_cb)
293 {
294         int i = 1;
295         struct sched *current;
296
297         ast_mutex_lock(&con->lock);
298         while ((current = ast_heap_peek(con->sched_heap, i))) {
299                 if (current->callback != match) {
300                         i++;
301                         continue;
302                 }
303
304                 ast_heap_remove(con->sched_heap, current);
305
306                 cleanup_cb(current->data);
307                 sched_release(con, current);
308         }
309         ast_mutex_unlock(&con->lock);
310 }
311
312 /*! \brief
313  * Return the number of milliseconds
314  * until the next scheduled event
315  */
316 int ast_sched_wait(struct ast_sched_context *con)
317 {
318         int ms;
319         struct sched *s;
320
321         DEBUG(ast_debug(1, "ast_sched_wait()\n"));
322
323         ast_mutex_lock(&con->lock);
324         if ((s = ast_heap_peek(con->sched_heap, 1))) {
325                 ms = ast_tvdiff_ms(s->when, ast_tvnow());
326                 if (ms < 0) {
327                         ms = 0;
328                 }
329         } else {
330                 ms = -1;
331         }
332         ast_mutex_unlock(&con->lock);
333
334         return ms;
335 }
336
337
338 /*! \brief
339  * Take a sched structure and put it in the
340  * queue, such that the soonest event is
341  * first in the list.
342  */
343 static void schedule(struct ast_sched_context *con, struct sched *s)
344 {
345         ast_heap_push(con->sched_heap, s);
346
347         if (ast_heap_size(con->sched_heap) > con->highwater) {
348                 con->highwater = ast_heap_size(con->sched_heap);
349         }
350 }
351
352 /*! \brief
353  * given the last event *tv and the offset in milliseconds 'when',
354  * computes the next value,
355  */
356 static int sched_settime(struct timeval *t, int when)
357 {
358         struct timeval now = ast_tvnow();
359
360         /*ast_debug(1, "TV -> %lu,%lu\n", tv->tv_sec, tv->tv_usec);*/
361         if (ast_tvzero(*t))     /* not supplied, default to now */
362                 *t = now;
363         *t = ast_tvadd(*t, ast_samp2tv(when, 1000));
364         if (ast_tvcmp(*t, now) < 0) {
365                 *t = now;
366         }
367         return 0;
368 }
369
370 int ast_sched_replace_variable(int old_id, struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data, int variable)
371 {
372         /* 0 means the schedule item is new; do not delete */
373         if (old_id > 0) {
374                 AST_SCHED_DEL(con, old_id);
375         }
376         return ast_sched_add_variable(con, when, callback, data, variable);
377 }
378
379 /*! \brief
380  * Schedule callback(data) to happen when ms into the future
381  */
382 int ast_sched_add_variable(struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data, int variable)
383 {
384         struct sched *tmp;
385         int res = -1;
386
387         DEBUG(ast_debug(1, "ast_sched_add()\n"));
388
389         ast_mutex_lock(&con->lock);
390         if ((tmp = sched_alloc(con))) {
391                 tmp->id = con->eventcnt++;
392                 tmp->callback = callback;
393                 tmp->data = data;
394                 tmp->resched = when;
395                 tmp->variable = variable;
396                 tmp->when = ast_tv(0, 0);
397                 tmp->deleted = 0;
398                 if (sched_settime(&tmp->when, when)) {
399                         sched_release(con, tmp);
400                 } else {
401                         schedule(con, tmp);
402                         res = tmp->id;
403                 }
404         }
405 #ifdef DUMP_SCHEDULER
406         /* Dump contents of the context while we have the lock so nothing gets screwed up by accident. */
407         if (option_debug)
408                 ast_sched_dump(con);
409 #endif
410         if (con->sched_thread) {
411                 ast_cond_signal(&con->sched_thread->cond);
412         }
413         ast_mutex_unlock(&con->lock);
414
415         return res;
416 }
417
418 int ast_sched_replace(int old_id, struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data)
419 {
420         if (old_id > -1) {
421                 AST_SCHED_DEL(con, old_id);
422         }
423         return ast_sched_add(con, when, callback, data);
424 }
425
426 int ast_sched_add(struct ast_sched_context *con, int when, ast_sched_cb callback, const void *data)
427 {
428         return ast_sched_add_variable(con, when, callback, data, 0);
429 }
430
431 static struct sched *sched_find(struct ast_sched_context *con, int id)
432 {
433         int x;
434         size_t heap_size;
435
436         heap_size = ast_heap_size(con->sched_heap);
437         for (x = 1; x <= heap_size; x++) {
438                 struct sched *cur = ast_heap_peek(con->sched_heap, x);
439
440                 if (cur->id == id) {
441                         return cur;
442                 }
443         }
444
445         return NULL;
446 }
447
448 const void *ast_sched_find_data(struct ast_sched_context *con, int id)
449 {
450         struct sched *s;
451         const void *data = NULL;
452
453         ast_mutex_lock(&con->lock);
454
455         s = sched_find(con, id);
456         if (s) {
457                 data = s->data;
458         }
459
460         ast_mutex_unlock(&con->lock);
461
462         return data;
463 }
464
465 /*! \brief
466  * Delete the schedule entry with number
467  * "id".  It's nearly impossible that there
468  * would be two or more in the list with that
469  * id.
470  */
471 #ifndef AST_DEVMODE
472 int ast_sched_del(struct ast_sched_context *con, int id)
473 #else
474 int _ast_sched_del(struct ast_sched_context *con, int id, const char *file, int line, const char *function)
475 #endif
476 {
477         struct sched *s = NULL;
478         int *last_id = ast_threadstorage_get(&last_del_id, sizeof(int));
479
480         DEBUG(ast_debug(1, "ast_sched_del(%d)\n", id));
481
482         if (id < 0) {
483                 return 0;
484         }
485
486         ast_mutex_lock(&con->lock);
487
488         s = sched_find(con, id);
489         if (s) {
490                 if (!ast_heap_remove(con->sched_heap, s)) {
491                         ast_log(LOG_WARNING,"sched entry %d not in the sched heap?\n", s->id);
492                 }
493                 sched_release(con, s);
494         } else if (con->currently_executing && (id == con->currently_executing->id)) {
495                 s = con->currently_executing;
496                 s->deleted = 1;
497                 /* Wait for executing task to complete so that caller of ast_sched_del() does not
498                  * free memory out from under the task.
499                  */
500                 while (con->currently_executing && (id == con->currently_executing->id)) {
501                         ast_cond_wait(&s->cond, &con->lock);
502                 }
503                 /* Do not sched_release() here because ast_sched_runq() will do it */
504         }
505
506 #ifdef DUMP_SCHEDULER
507         /* Dump contents of the context while we have the lock so nothing gets screwed up by accident. */
508         if (option_debug)
509                 ast_sched_dump(con);
510 #endif
511         if (con->sched_thread) {
512                 ast_cond_signal(&con->sched_thread->cond);
513         }
514         ast_mutex_unlock(&con->lock);
515
516         if (!s && *last_id != id) {
517                 ast_debug(1, "Attempted to delete nonexistent schedule entry %d!\n", id);
518                 /* Removing nonexistent schedule entry shouldn't trigger assert (it was enabled in DEV_MODE);
519                  * because in many places entries is deleted without having valid id. */
520                 *last_id = id;
521                 return -1;
522         } else if (!s) {
523                 return -1;
524         }
525
526         return 0;
527 }
528
529 void ast_sched_report(struct ast_sched_context *con, struct ast_str **buf, struct ast_cb_names *cbnames)
530 {
531         int i, x;
532         struct sched *cur;
533         int countlist[cbnames->numassocs + 1];
534         size_t heap_size;
535
536         memset(countlist, 0, sizeof(countlist));
537         ast_str_set(buf, 0, " Highwater = %u\n schedcnt = %zu\n", con->highwater, ast_heap_size(con->sched_heap));
538
539         ast_mutex_lock(&con->lock);
540
541         heap_size = ast_heap_size(con->sched_heap);
542         for (x = 1; x <= heap_size; x++) {
543                 cur = ast_heap_peek(con->sched_heap, x);
544                 /* match the callback to the cblist */
545                 for (i = 0; i < cbnames->numassocs; i++) {
546                         if (cur->callback == cbnames->cblist[i]) {
547                                 break;
548                         }
549                 }
550                 if (i < cbnames->numassocs) {
551                         countlist[i]++;
552                 } else {
553                         countlist[cbnames->numassocs]++;
554                 }
555         }
556
557         ast_mutex_unlock(&con->lock);
558
559         for (i = 0; i < cbnames->numassocs; i++) {
560                 ast_str_append(buf, 0, "    %s : %d\n", cbnames->list[i], countlist[i]);
561         }
562
563         ast_str_append(buf, 0, "   <unknown> : %d\n", countlist[cbnames->numassocs]);
564 }
565
566 /*! \brief Dump the contents of the scheduler to LOG_DEBUG */
567 void ast_sched_dump(struct ast_sched_context *con)
568 {
569         struct sched *q;
570         struct timeval when = ast_tvnow();
571         int x;
572         size_t heap_size;
573 #ifdef SCHED_MAX_CACHE
574         ast_debug(1, "Asterisk Schedule Dump (%zu in Q, %u Total, %u Cache, %u high-water)\n", ast_heap_size(con->sched_heap), con->eventcnt - 1, con->schedccnt, con->highwater);
575 #else
576         ast_debug(1, "Asterisk Schedule Dump (%zu in Q, %u Total, %u high-water)\n", ast_heap_size(con->sched_heap), con->eventcnt - 1, con->highwater);
577 #endif
578
579         ast_debug(1, "=============================================================\n");
580         ast_debug(1, "|ID    Callback          Data              Time  (sec:ms)   |\n");
581         ast_debug(1, "+-----+-----------------+-----------------+-----------------+\n");
582         ast_mutex_lock(&con->lock);
583         heap_size = ast_heap_size(con->sched_heap);
584         for (x = 1; x <= heap_size; x++) {
585                 struct timeval delta;
586                 q = ast_heap_peek(con->sched_heap, x);
587                 delta = ast_tvsub(q->when, when);
588                 ast_debug(1, "|%.4d | %-15p | %-15p | %.6ld : %.6ld |\n",
589                         q->id,
590                         q->callback,
591                         q->data,
592                         (long)delta.tv_sec,
593                         (long int)delta.tv_usec);
594         }
595         ast_mutex_unlock(&con->lock);
596         ast_debug(1, "=============================================================\n");
597 }
598
599 /*! \brief
600  * Launch all events which need to be run at this time.
601  */
602 int ast_sched_runq(struct ast_sched_context *con)
603 {
604         struct sched *current;
605         struct timeval when;
606         int numevents;
607         int res;
608
609         DEBUG(ast_debug(1, "ast_sched_runq()\n"));
610
611         ast_mutex_lock(&con->lock);
612
613         when = ast_tvadd(ast_tvnow(), ast_tv(0, 1000));
614         for (numevents = 0; (current = ast_heap_peek(con->sched_heap, 1)); numevents++) {
615                 /* schedule all events which are going to expire within 1ms.
616                  * We only care about millisecond accuracy anyway, so this will
617                  * help us get more than one event at one time if they are very
618                  * close together.
619                  */
620                 if (ast_tvcmp(current->when, when) != -1) {
621                         break;
622                 }
623
624                 current = ast_heap_pop(con->sched_heap);
625
626                 /*
627                  * At this point, the schedule queue is still intact.  We
628                  * have removed the first event and the rest is still there,
629                  * so it's permissible for the callback to add new events, but
630                  * trying to delete itself won't work because it isn't in
631                  * the schedule queue.  If that's what it wants to do, it
632                  * should return 0.
633                  */
634
635                 con->currently_executing = current;
636                 ast_mutex_unlock(&con->lock);
637                 res = current->callback(current->data);
638                 ast_mutex_lock(&con->lock);
639                 con->currently_executing = NULL;
640                 ast_cond_signal(&current->cond);
641
642                 if (res && !current->deleted) {
643                         /*
644                          * If they return non-zero, we should schedule them to be
645                          * run again.
646                          */
647                         if (sched_settime(&current->when, current->variable? res : current->resched)) {
648                                 sched_release(con, current);
649                         } else {
650                                 schedule(con, current);
651                         }
652                 } else {
653                         /* No longer needed, so release it */
654                         sched_release(con, current);
655                 }
656         }
657
658         ast_mutex_unlock(&con->lock);
659
660         return numevents;
661 }
662
663 long ast_sched_when(struct ast_sched_context *con,int id)
664 {
665         struct sched *s;
666         long secs = -1;
667         DEBUG(ast_debug(1, "ast_sched_when()\n"));
668
669         ast_mutex_lock(&con->lock);
670
671         s = sched_find(con, id);
672         if (s) {
673                 struct timeval now = ast_tvnow();
674                 secs = s->when.tv_sec - now.tv_sec;
675         }
676
677         ast_mutex_unlock(&con->lock);
678
679         return secs;
680 }