2 * Asterisk -- An open source telephony toolkit.
4 * Copyright (C) 2007, Digium, Inc.
6 * Russell Bryant <russell@digium.com>
8 * See http://www.asterisk.org for more information about
9 * the Asterisk project. Please do not directly contact
10 * any of the maintainers of this project for assistance;
11 * the project provides a web site, mailing lists and IRC
12 * channels for your use.
14 * This program is free software, distributed under the terms of
15 * the GNU General Public License Version 2. See the LICENSE file
16 * at the top of the source tree.
21 * \brief Internal generic event system
23 * \author Russell Bryant <russell@digium.com>
28 ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
33 #include "asterisk/event.h"
34 #include "asterisk/linkedlists.h"
35 #include "asterisk/lock.h"
36 #include "asterisk/utils.h"
38 /* Only use one thread for now to ensure ordered delivery */
39 #define NUM_EVENT_THREADS 1
42 enum ast_event_ie_type ie_type:16;
43 /*! Total length of the IE payload */
44 uint16_t ie_payload_len;
45 unsigned char ie_payload[0];
46 } __attribute__ ((packed));
51 * \note The format of this structure is important, and can not change, since
52 * they are sent directly over the network (via IAX2).
57 enum ast_event_type type:16;
58 /*! Total length of the event */
59 uint16_t event_len:16;
60 /*! The data payload of the event, made up of information elements */
61 unsigned char payload[0];
62 } __attribute__ ((packed));
64 struct ast_event_ref {
65 struct ast_event *event;
66 AST_LIST_ENTRY(ast_event_ref) entry;
69 /*! \brief data shared between event dispatching threads */
73 AST_LIST_HEAD_NOLOCK(, ast_event_ref) event_q;
75 .lock = AST_MUTEX_INIT_VALUE,
78 struct ast_event_ie_val {
79 AST_LIST_ENTRY(ast_event_ie_val) entry;
80 enum ast_event_ie_type ie_type;
81 enum ast_event_ie_pltype ie_pltype;
88 /*! \brief Event subscription */
89 struct ast_event_sub {
90 enum ast_event_type type;
94 AST_LIST_HEAD_NOLOCK(, ast_event_ie_val) ie_vals;
95 AST_RWLIST_ENTRY(ast_event_sub) entry;
98 static uint32_t sub_uniqueid;
100 /*! \brief Event subscriptions
101 * The event subscribers are indexed by which event they are subscribed to */
102 static AST_RWLIST_HEAD(ast_event_sub_list, ast_event_sub) ast_event_subs[AST_EVENT_TOTAL];
104 /*! \brief Cached events
105 * The event cache is indexed on the event type. The purpose of this is
106 * for events that express some sort of state. So, when someone first
107 * needs to know this state, it can get the last known state from the cache. */
108 static AST_RWLIST_HEAD(ast_event_ref_list, ast_event_ref) ast_event_cache[AST_EVENT_TOTAL];
110 static void ast_event_ie_val_destroy(struct ast_event_ie_val *ie_val)
112 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_STR)
113 ast_free((void *) ie_val->payload.str);
118 enum ast_event_subscriber_res ast_event_check_subscriber(enum ast_event_type type, ...)
121 enum ast_event_ie_type ie_type;
122 enum ast_event_subscriber_res res = AST_EVENT_SUB_NONE;
123 struct ast_event_ie_val *ie_val, *sub_ie_val;
124 struct ast_event_sub *sub;
125 AST_LIST_HEAD_NOLOCK_STATIC(ie_vals, ast_event_ie_val);
127 if (type >= AST_EVENT_TOTAL) {
128 ast_log(LOG_ERROR, "%u is an invalid type!\n", type);
133 for (ie_type = va_arg(ap, enum ast_event_type);
134 ie_type != AST_EVENT_IE_END;
135 ie_type = va_arg(ap, enum ast_event_type))
137 struct ast_event_ie_val *ie_val = alloca(sizeof(*ie_val));
138 memset(ie_val, 0, sizeof(*ie_val));
139 ie_val->ie_type = ie_type;
140 ie_val->ie_pltype = va_arg(ap, enum ast_event_ie_pltype);
141 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_UINT)
142 ie_val->payload.uint = va_arg(ap, uint32_t);
143 else if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_STR)
144 ie_val->payload.str = ast_strdupa(va_arg(ap, const char *));
145 AST_LIST_INSERT_TAIL(&ie_vals, ie_val, entry);
149 AST_RWLIST_RDLOCK(&ast_event_subs[type]);
150 AST_RWLIST_TRAVERSE(&ast_event_subs[type], sub, entry) {
151 AST_LIST_TRAVERSE(&ie_vals, ie_val, entry) {
152 AST_LIST_TRAVERSE(&sub->ie_vals, sub_ie_val, entry) {
153 if (sub_ie_val->ie_type == ie_val->ie_type)
157 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_EXISTS)
161 /* The subscriber doesn't actually care what the value is */
162 if (sub_ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_EXISTS)
164 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_UINT &&
165 ie_val->payload.uint != sub_ie_val->payload.uint)
167 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_STR &&
168 strcmp(ie_val->payload.str, sub_ie_val->payload.str))
174 AST_RWLIST_UNLOCK(&ast_event_subs[type]);
176 if (sub) /* All parameters were matched */
177 return AST_EVENT_SUB_EXISTS;
179 AST_RWLIST_RDLOCK(&ast_event_subs[AST_EVENT_ALL]);
180 if (!AST_LIST_EMPTY(&ast_event_subs[AST_EVENT_ALL]))
181 res = AST_EVENT_SUB_EXISTS;
182 AST_RWLIST_UNLOCK(&ast_event_subs[AST_EVENT_ALL]);
187 /*! \brief Send AST_EVENT_SUB events to this subscriber of ... subscriber events */
188 void ast_event_report_subs(const struct ast_event_sub *event_sub)
190 struct ast_event *event;
191 struct ast_event_sub *sub;
192 enum ast_event_type event_type = -1;
193 struct ast_event_ie_val *ie_val;
195 if (event_sub->type != AST_EVENT_SUB)
198 AST_LIST_TRAVERSE(&event_sub->ie_vals, ie_val, entry) {
199 if (ie_val->ie_type == AST_EVENT_IE_EVENTTYPE) {
200 event_type = ie_val->payload.uint;
205 if (event_type == -1)
208 AST_RWLIST_RDLOCK(&ast_event_subs[event_type]);
209 AST_RWLIST_TRAVERSE(&ast_event_subs[event_type], sub, entry) {
210 if (event_sub == sub)
213 event = ast_event_new(AST_EVENT_SUB,
214 AST_EVENT_IE_UNIQUEID, AST_EVENT_IE_PLTYPE_UINT, sub->uniqueid,
215 AST_EVENT_IE_EVENTTYPE, AST_EVENT_IE_PLTYPE_UINT, sub->type,
218 AST_LIST_TRAVERSE(&sub->ie_vals, ie_val, entry) {
219 switch (ie_val->ie_pltype) {
220 case AST_EVENT_IE_PLTYPE_EXISTS:
221 ast_event_append_ie_uint(&event, AST_EVENT_IE_EXISTS, ie_val->ie_type);
223 case AST_EVENT_IE_PLTYPE_UINT:
224 ast_event_append_ie_uint(&event, ie_val->ie_type, ie_val->payload.uint);
226 case AST_EVENT_IE_PLTYPE_STR:
227 ast_event_append_ie_str(&event, ie_val->ie_type, ie_val->payload.str);
237 event_sub->cb(event, event_sub->userdata);
239 ast_event_destroy(event);
241 AST_RWLIST_UNLOCK(&ast_event_subs[event_type]);
244 struct ast_event_sub *ast_event_subscribe(enum ast_event_type type, ast_event_cb_t cb,
248 enum ast_event_ie_type ie_type;
249 struct ast_event_sub *sub;
250 struct ast_event *event;
252 if (type >= AST_EVENT_TOTAL) {
253 ast_log(LOG_ERROR, "%u is an invalid type!\n", type);
257 if (!(sub = ast_calloc(1, sizeof(*sub))))
260 va_start(ap, userdata);
261 for (ie_type = va_arg(ap, enum ast_event_type);
262 ie_type != AST_EVENT_IE_END;
263 ie_type = va_arg(ap, enum ast_event_type))
265 struct ast_event_ie_val *ie_val;
266 if (!(ie_val = ast_calloc(1, sizeof(*ie_val))))
268 ie_val->ie_type = ie_type;
269 ie_val->ie_pltype = va_arg(ap, enum ast_event_ie_pltype);
270 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_UINT)
271 ie_val->payload.uint = va_arg(ap, uint32_t);
272 else if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_STR) {
273 if (!(ie_val->payload.str = ast_strdup(va_arg(ap, const char *)))) {
278 AST_LIST_INSERT_TAIL(&sub->ie_vals, ie_val, entry);
284 sub->userdata = userdata;
285 sub->uniqueid = ast_atomic_fetchadd_int((int *) &sub_uniqueid, 1);
287 if (ast_event_check_subscriber(AST_EVENT_SUB,
288 AST_EVENT_IE_EVENTTYPE, AST_EVENT_IE_PLTYPE_UINT, type,
289 AST_EVENT_IE_END) != AST_EVENT_SUB_NONE) {
290 struct ast_event_ie_val *ie_val;
292 event = ast_event_new(AST_EVENT_SUB,
293 AST_EVENT_IE_UNIQUEID, AST_EVENT_IE_PLTYPE_UINT, sub->uniqueid,
294 AST_EVENT_IE_EVENTTYPE, AST_EVENT_IE_PLTYPE_UINT, sub->type,
297 AST_LIST_TRAVERSE(&sub->ie_vals, ie_val, entry) {
298 switch (ie_val->ie_pltype) {
299 case AST_EVENT_IE_PLTYPE_EXISTS:
300 ast_event_append_ie_uint(&event, AST_EVENT_IE_EXISTS, ie_val->ie_type);
302 case AST_EVENT_IE_PLTYPE_UINT:
303 ast_event_append_ie_uint(&event, ie_val->ie_type, ie_val->payload.uint);
305 case AST_EVENT_IE_PLTYPE_STR:
306 ast_event_append_ie_str(&event, ie_val->ie_type, ie_val->payload.str);
314 ast_event_queue(event);
317 AST_RWLIST_WRLOCK(&ast_event_subs[type]);
318 AST_RWLIST_INSERT_TAIL(&ast_event_subs[type], sub, entry);
319 AST_RWLIST_UNLOCK(&ast_event_subs[type]);
324 static void ast_event_sub_destroy(struct ast_event_sub *sub)
326 struct ast_event_ie_val *ie_val;
328 while ((ie_val = AST_LIST_REMOVE_HEAD(&sub->ie_vals, entry)))
329 ast_event_ie_val_destroy(ie_val);
334 void ast_event_unsubscribe(struct ast_event_sub *sub)
336 struct ast_event *event;
338 AST_RWLIST_WRLOCK(&ast_event_subs[sub->type]);
339 AST_LIST_REMOVE(&ast_event_subs[sub->type], sub, entry);
340 AST_RWLIST_UNLOCK(&ast_event_subs[sub->type]);
342 if (ast_event_check_subscriber(AST_EVENT_UNSUB,
343 AST_EVENT_IE_EVENTTYPE, AST_EVENT_IE_PLTYPE_UINT, sub->type,
344 AST_EVENT_IE_END) != AST_EVENT_SUB_NONE) {
346 event = ast_event_new(AST_EVENT_UNSUB,
347 AST_EVENT_IE_UNIQUEID, AST_EVENT_IE_PLTYPE_UINT, sub->uniqueid,
348 AST_EVENT_IE_EVENTTYPE, AST_EVENT_IE_PLTYPE_UINT, sub->type,
352 ast_event_queue(event);
355 ast_event_sub_destroy(sub);
358 enum ast_event_type ast_event_get_type(const struct ast_event *event)
360 return ntohs(event->type);
363 uint32_t ast_event_get_ie_uint(const struct ast_event *event, enum ast_event_ie_type ie_type)
365 const uint32_t *ie_val;
367 ie_val = ast_event_get_ie_raw(event, ie_type);
369 return ie_val ? ntohl(*ie_val) : 0;
372 const char *ast_event_get_ie_str(const struct ast_event *event, enum ast_event_ie_type ie_type)
374 return ast_event_get_ie_raw(event, ie_type);
377 const void *ast_event_get_ie_raw(const struct ast_event *event, enum ast_event_ie_type ie_type)
379 struct ast_event_ie *ie;
382 ie_type = ntohs(ie_type);
383 event_len = ntohs(event->event_len);
385 ie = ((void *) event) + sizeof(*event);
387 while ((((void *) ie) - ((void *) event)) < event_len) {
388 if (ie->ie_type == ie_type)
389 return ie->ie_payload;
390 ie = ((void *) ie) + sizeof(*ie) + ntohs(ie->ie_payload_len);
396 int ast_event_append_ie_str(struct ast_event **event, enum ast_event_ie_type ie_type,
399 return ast_event_append_ie_raw(event, ie_type, str, strlen(str) + 1);
402 int ast_event_append_ie_uint(struct ast_event **event, enum ast_event_ie_type ie_type,
406 return ast_event_append_ie_raw(event, ie_type, &data, sizeof(data));
409 int ast_event_append_ie_raw(struct ast_event **event, enum ast_event_ie_type ie_type,
410 const void *data, size_t data_len)
412 struct ast_event_ie *ie;
413 unsigned int extra_len;
416 event_len = ntohs((*event)->event_len);
417 extra_len = sizeof(*ie) + data_len;
419 if (!(*event = ast_realloc(*event, event_len + extra_len)))
422 ie = ((void *) *event) + event_len;
423 ie->ie_type = htons(ie_type);
424 ie->ie_payload_len = htons(data_len);
425 memcpy(ie->ie_payload, data, data_len);
427 (*event)->event_len = htons(event_len + extra_len);
432 struct ast_event *ast_event_new(enum ast_event_type type, ...)
435 struct ast_event *event;
436 enum ast_event_type ie_type;
437 struct ast_event_ie_val *ie_val;
438 AST_LIST_HEAD_NOLOCK_STATIC(ie_vals, ast_event_ie_val);
441 if (type >= AST_EVENT_TOTAL) {
442 ast_log(LOG_WARNING, "Someone tried to create an event of invalid "
443 "type '%d'!\n", type);
448 for (ie_type = va_arg(ap, enum ast_event_type);
449 ie_type != AST_EVENT_IE_END;
450 ie_type = va_arg(ap, enum ast_event_type))
452 struct ast_event_ie_val *ie_val = alloca(sizeof(*ie_val));
453 memset(ie_val, 0, sizeof(*ie_val));
454 ie_val->ie_type = ie_type;
455 ie_val->ie_pltype = va_arg(ap, enum ast_event_ie_pltype);
456 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_UINT)
457 ie_val->payload.uint = va_arg(ap, uint32_t);
458 else if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_STR)
459 ie_val->payload.str = ast_strdupa(va_arg(ap, const char *));
460 AST_LIST_INSERT_TAIL(&ie_vals, ie_val, entry);
464 if (!(event = ast_calloc(1, sizeof(*event))))
467 event->type = htons(type);
468 event->event_len = htons(sizeof(*event));
470 AST_LIST_TRAVERSE(&ie_vals, ie_val, entry) {
471 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_STR)
472 ast_event_append_ie_str(&event, ie_val->ie_type, ie_val->payload.str);
473 else if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_UINT)
474 ast_event_append_ie_uint(&event, ie_val->ie_type, ie_val->payload.uint);
483 void ast_event_destroy(struct ast_event *event)
488 static void ast_event_ref_destroy(struct ast_event_ref *event_ref)
490 ast_event_destroy(event_ref->event);
494 static struct ast_event *ast_event_dup(const struct ast_event *event)
496 struct ast_event *dup_event;
499 event_len = ntohs(event->event_len);
501 if (!(dup_event = ast_calloc(1, event_len)))
504 memcpy(dup_event, event, event_len);
509 struct ast_event *ast_event_get_cached(enum ast_event_type type, ...)
512 enum ast_event_ie_type ie_type;
513 struct ast_event *dup_event = NULL;
514 struct ast_event_ref *event_ref;
516 AST_LIST_ENTRY(cache_arg) entry;
517 enum ast_event_ie_type ie_type;
518 enum ast_event_ie_pltype ie_pltype;
524 AST_LIST_HEAD_NOLOCK_STATIC(cache_args, cache_arg);
526 if (type >= AST_EVENT_TOTAL) {
527 ast_log(LOG_ERROR, "%u is an invalid type!\n", type);
532 for (ie_type = va_arg(ap, enum ast_event_type);
533 ie_type != AST_EVENT_IE_END;
534 ie_type = va_arg(ap, enum ast_event_type))
536 cache_arg = alloca(sizeof(*cache_arg));
537 memset(cache_arg, 0, sizeof(*cache_arg));
538 cache_arg->ie_type = ie_type;
539 cache_arg->ie_pltype = va_arg(ap, enum ast_event_ie_pltype);
540 if (cache_arg->ie_pltype == AST_EVENT_IE_PLTYPE_UINT)
541 cache_arg->payload.uint = va_arg(ap, uint32_t);
542 else if (cache_arg->ie_pltype == AST_EVENT_IE_PLTYPE_STR)
543 cache_arg->payload.str = ast_strdupa(va_arg(ap, const char *));
544 AST_LIST_INSERT_TAIL(&cache_args, cache_arg, entry);
548 if (AST_LIST_EMPTY(&cache_args)) {
549 ast_log(LOG_ERROR, "Events can not be retrieved from the cache without "
550 "specifying at least one IE type!\n");
554 AST_RWLIST_RDLOCK(&ast_event_cache[type]);
555 AST_RWLIST_TRAVERSE_SAFE_BEGIN(&ast_event_cache[type], event_ref, entry) {
556 AST_LIST_TRAVERSE(&cache_args, cache_arg, entry) {
557 if ( ! ( (cache_arg->ie_pltype == AST_EVENT_IE_PLTYPE_UINT &&
558 (cache_arg->payload.uint ==
559 ast_event_get_ie_uint(event_ref->event, cache_arg->ie_type))) ||
561 (cache_arg->ie_pltype == AST_EVENT_IE_PLTYPE_STR &&
562 (!strcmp(cache_arg->payload.str,
563 ast_event_get_ie_str(event_ref->event, cache_arg->ie_type)))) ||
565 (cache_arg->ie_pltype == AST_EVENT_IE_PLTYPE_EXISTS &&
566 ast_event_get_ie_raw(event_ref->event, cache_arg->ie_type)) ) )
572 /* All parameters were matched on this cache entry, so return it */
573 dup_event = ast_event_dup(event_ref->event);
577 AST_RWLIST_TRAVERSE_SAFE_END
578 AST_RWLIST_UNLOCK(&ast_event_cache[type]);
583 /*! \brief Duplicate an event and add it to the cache
584 * \note This assumes this index in to the cache is locked */
585 static int ast_event_dup_and_cache(const struct ast_event *event)
587 struct ast_event *dup_event;
588 struct ast_event_ref *event_ref;
590 if (!(dup_event = ast_event_dup(event)))
592 if (!(event_ref = ast_calloc(1, sizeof(*event_ref))))
595 event_ref->event = dup_event;
597 AST_LIST_INSERT_TAIL(&ast_event_cache[ntohs(event->type)], event_ref, entry);
602 int ast_event_queue_and_cache(struct ast_event *event, ...)
605 enum ast_event_type ie_type;
606 uint16_t host_event_type;
607 struct ast_event_ref *event_ref;
610 AST_LIST_ENTRY(cache_arg) entry;
611 enum ast_event_ie_type ie_type;
612 enum ast_event_ie_pltype ie_pltype;
614 AST_LIST_HEAD_NOLOCK_STATIC(cache_args, cache_arg);
616 host_event_type = ntohs(event->type);
619 if (host_event_type >= AST_EVENT_TOTAL) {
620 ast_log(LOG_WARNING, "Someone tried to queue an event of invalid "
621 "type '%d'!\n", host_event_type);
626 for (ie_type = va_arg(ap, enum ast_event_type);
627 ie_type != AST_EVENT_IE_END;
628 ie_type = va_arg(ap, enum ast_event_type))
630 cache_arg = alloca(sizeof(*cache_arg));
631 memset(cache_arg, 0, sizeof(*cache_arg));
632 cache_arg->ie_type = ie_type;
633 cache_arg->ie_pltype = va_arg(ap, enum ast_event_ie_pltype);
634 AST_LIST_INSERT_TAIL(&cache_args, cache_arg, entry);
638 if (AST_LIST_EMPTY(&cache_args)) {
639 ast_log(LOG_ERROR, "Events can not be cached without specifying at "
640 "least one IE type!\n");
641 return ast_event_queue(event);
644 AST_RWLIST_WRLOCK(&ast_event_cache[host_event_type]);
645 AST_RWLIST_TRAVERSE_SAFE_BEGIN(&ast_event_cache[host_event_type], event_ref, entry) {
646 AST_LIST_TRAVERSE(&cache_args, cache_arg, entry) {
647 if ( ! ( (cache_arg->ie_pltype == AST_EVENT_IE_PLTYPE_UINT &&
648 (ast_event_get_ie_uint(event, cache_arg->ie_type) ==
649 ast_event_get_ie_uint(event_ref->event, cache_arg->ie_type))) ||
651 (cache_arg->ie_pltype == AST_EVENT_IE_PLTYPE_STR &&
652 (!strcmp(ast_event_get_ie_str(event, cache_arg->ie_type),
653 ast_event_get_ie_str(event_ref->event, cache_arg->ie_type)))) ||
655 (cache_arg->ie_pltype == AST_EVENT_IE_PLTYPE_EXISTS &&
656 ast_event_get_ie_raw(event_ref->event, cache_arg->ie_type)) ) )
662 /* All parameters were matched on this cache entry, so remove it */
663 AST_LIST_REMOVE_CURRENT(&ast_event_cache[host_event_type], entry);
664 ast_event_ref_destroy(event_ref);
667 AST_RWLIST_TRAVERSE_SAFE_END
668 res = ast_event_dup_and_cache(event);
669 AST_RWLIST_UNLOCK(&ast_event_cache[host_event_type]);
671 return (ast_event_queue(event) || res) ? -1 : 0;
674 int ast_event_queue(struct ast_event *event)
676 struct ast_event_ref *event_ref;
677 uint16_t host_event_type;
679 host_event_type = ntohs(event->type);
682 if (host_event_type >= AST_EVENT_TOTAL) {
683 ast_log(LOG_WARNING, "Someone tried to queue an event of invalid "
684 "type '%d'!\n", host_event_type);
688 /* If nobody has subscribed to this event type, throw it away now */
689 if (ast_event_check_subscriber(host_event_type, AST_EVENT_IE_END)
690 == AST_EVENT_SUB_NONE) {
691 ast_event_destroy(event);
695 if (!(event_ref = ast_calloc(1, sizeof(*event_ref))))
698 event_ref->event = event;
700 ast_mutex_lock(&event_thread.lock);
701 AST_LIST_INSERT_TAIL(&event_thread.event_q, event_ref, entry);
702 ast_cond_signal(&event_thread.cond);
703 ast_mutex_unlock(&event_thread.lock);
708 static void *ast_event_dispatcher(void *unused)
711 struct ast_event_ref *event_ref;
712 struct ast_event_sub *sub;
713 uint16_t host_event_type;
715 ast_mutex_lock(&event_thread.lock);
716 while (!(event_ref = AST_LIST_REMOVE_HEAD(&event_thread.event_q, entry)))
717 ast_cond_wait(&event_thread.cond, &event_thread.lock);
718 ast_mutex_unlock(&event_thread.lock);
720 host_event_type = ntohs(event_ref->event->type);
722 /* Subscribers to this specific event first */
723 AST_RWLIST_RDLOCK(&ast_event_subs[host_event_type]);
724 AST_RWLIST_TRAVERSE(&ast_event_subs[host_event_type], sub, entry) {
725 struct ast_event_ie_val *ie_val;
726 AST_LIST_TRAVERSE(&sub->ie_vals, ie_val, entry) {
727 if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_EXISTS &&
728 ast_event_get_ie_raw(event_ref->event, ie_val->ie_type)) {
730 } else if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_UINT &&
731 ast_event_get_ie_uint(event_ref->event, ie_val->ie_type)
732 == ie_val->payload.uint) {
734 } else if (ie_val->ie_pltype == AST_EVENT_IE_PLTYPE_STR &&
735 !strcmp(ast_event_get_ie_str(event_ref->event, ie_val->ie_type),
736 ie_val->payload.str)) {
743 sub->cb(event_ref->event, sub->userdata);
745 AST_RWLIST_UNLOCK(&ast_event_subs[host_event_type]);
747 /* Now to subscribers to all event types */
748 AST_RWLIST_RDLOCK(&ast_event_subs[AST_EVENT_ALL]);
749 AST_RWLIST_TRAVERSE(&ast_event_subs[AST_EVENT_ALL], sub, entry)
750 sub->cb(event_ref->event, sub->userdata);
751 AST_RWLIST_UNLOCK(&ast_event_subs[AST_EVENT_ALL]);
753 ast_event_ref_destroy(event_ref);
759 void ast_event_init(void)
763 for (i = 0; i < AST_EVENT_TOTAL; i++)
764 AST_RWLIST_HEAD_INIT(&ast_event_subs[i]);
766 for (i = 0; i < AST_EVENT_TOTAL; i++)
767 AST_RWLIST_HEAD_INIT(&ast_event_cache[i]);
769 ast_cond_init(&event_thread.cond, NULL);
771 for (i = 0; i < NUM_EVENT_THREADS; i++) {
773 ast_pthread_create_background(&dont_care, NULL, ast_event_dispatcher, NULL);