#include "asterisk.h"
-ASTERISK_FILE_VERSION(__FILE__, "$Revision$");
-
#include "asterisk/astobj2.h"
#include "asterisk/stasis_internal.h"
#include "asterisk/stasis.h"
#include "asterisk/taskprocessor.h"
+#include "asterisk/threadpool.h"
#include "asterisk/utils.h"
#include "asterisk/uuid.h"
#include "asterisk/vector.h"
</description>
<see-also>
<ref type="application">UserEvent</ref>
+ <ref type="managerEvent">UserEvent</ref>
</see-also>
</managerEventInstance>
</managerEvent>
<configInfo name="stasis" language="en_US">
<configFile name="stasis.conf">
+ <configObject name="threadpool">
+ <synopsis>Settings that configure the threadpool Stasis uses to deliver some messages.</synopsis>
+ <configOption name="initial_size" default="5">
+ <synopsis>Initial number of threads in the message bus threadpool.</synopsis>
+ </configOption>
+ <configOption name="idle_timeout_sec" default="20">
+ <synopsis>Number of seconds before an idle thread is disposed of.</synopsis>
+ </configOption>
+ <configOption name="max_size" default="50">
+ <synopsis>Maximum number of threads in the threadpool.</synopsis>
+ </configOption>
+ </configObject>
<configObject name="declined_message_types">
<synopsis>Stasis message types for which to decline creation.</synopsis>
<configOption name="decline">
/*! The number of buckets to use for topic pools */
#define TOPIC_POOL_BUCKETS 57
+/*! Thread pool for topics that don't want a dedicated taskprocessor */
+static struct ast_threadpool *pool;
+
STASIS_MESSAGE_TYPE_DEFN(stasis_subscription_change_type);
/*! \internal */
{
/* Notify that the final message has been received */
if (stasis_subscription_final_message(sub, message)) {
- SCOPED_AO2LOCK(lock, sub);
-
+ ao2_lock(sub);
sub->final_message_rxed = 1;
ast_cond_signal(&sub->join_cond);
+ ao2_unlock(sub);
}
/* Since sub is mostly immutable, no need to lock sub */
/* Notify that the final message has been processed */
if (stasis_subscription_final_message(sub, message)) {
- SCOPED_AO2LOCK(lock, sub);
-
+ ao2_lock(sub);
sub->final_message_processed = 1;
ast_cond_signal(&sub->join_cond);
+ ao2_unlock(sub);
}
}
static void send_subscription_subscribe(struct stasis_topic *topic, struct stasis_subscription *sub);
static void send_subscription_unsubscribe(struct stasis_topic *topic, struct stasis_subscription *sub);
+void stasis_subscription_cb_noop(void *data, struct stasis_subscription *sub, struct stasis_message *message)
+{
+}
+
struct stasis_subscription *internal_stasis_subscribe(
struct stasis_topic *topic,
stasis_subscription_cb callback,
void *data,
- int needs_mailbox)
+ int needs_mailbox,
+ int use_thread_pool)
{
- RAII_VAR(struct stasis_subscription *, sub, NULL, ao2_cleanup);
+ struct stasis_subscription *sub;
if (!topic) {
return NULL;
}
/* The ao2 lock is used for join_cond. */
- sub = ao2_t_alloc(sizeof(*sub), subscription_dtor, topic->name);
+ sub = ao2_t_alloc(sizeof(*sub), subscription_dtor, stasis_topic_name(topic));
if (!sub) {
return NULL;
}
-
ast_uuid_generate_str(sub->uniqueid, sizeof(sub->uniqueid));
if (needs_mailbox) {
- /* With a small number of subscribers, a thread-per-sub is
- * acceptable. If our usage changes so that we have larger
- * numbers of subscribers, we'll probably want to consider
- * a threadpool. We had that originally, but with so few
- * subscribers it was actually a performance loss instead of
- * a gain.
+ char tps_name[AST_TASKPROCESSOR_MAX_NAME + 1];
+
+ /* Create name with seq number appended. */
+ ast_taskprocessor_build_name(tps_name, sizeof(tps_name), "sub%c:%s",
+ use_thread_pool ? 'p' : 'm',
+ stasis_topic_name(topic));
+
+ /*
+ * With a small number of subscribers, a thread-per-sub is
+ * acceptable. For a large number of subscribers, a thread
+ * pool should be used.
*/
- sub->mailbox = ast_taskprocessor_get(sub->uniqueid,
- TPS_REF_DEFAULT);
+ if (use_thread_pool) {
+ sub->mailbox = ast_threadpool_serializer(tps_name, pool);
+ } else {
+ sub->mailbox = ast_taskprocessor_get(tps_name, TPS_REF_DEFAULT);
+ }
if (!sub->mailbox) {
+ ao2_ref(sub, -1);
+
return NULL;
}
ast_taskprocessor_set_local(sub->mailbox, sub);
ast_cond_init(&sub->join_cond, NULL);
if (topic_add_subscription(topic, sub) != 0) {
+ ao2_ref(sub, -1);
+
return NULL;
}
send_subscription_subscribe(topic, sub);
- ao2_ref(sub, +1);
return sub;
}
stasis_subscription_cb callback,
void *data)
{
- return internal_stasis_subscribe(topic, callback, data, 1);
+ return internal_stasis_subscribe(topic, callback, data, 1, 0);
+}
+
+struct stasis_subscription *stasis_subscribe_pool(
+ struct stasis_topic *topic,
+ stasis_subscription_cb callback,
+ void *data)
+{
+ return internal_stasis_subscribe(topic, callback, data, 1, 1);
}
static int sub_cleanup(void *data)
{
/* The subscription may be the last ref to this topic. Hold
* the topic ref open until after the unlock. */
- RAII_VAR(struct stasis_topic *, topic,
- ao2_bump(sub ? sub->topic : NULL), ao2_cleanup);
+ struct stasis_topic *topic;
if (!sub) {
return NULL;
}
+ topic = ao2_bump(sub->topic);
+
/* We have to remove the subscription first, to ensure the unsubscribe
* is the final message */
if (topic_remove_subscription(sub->topic, sub) != 0) {
ast_log(LOG_ERROR,
"Internal error: subscription has invalid topic\n");
+ ao2_cleanup(topic);
+
return NULL;
}
/* Unsubscribing unrefs the subscription */
ao2_cleanup(sub);
+ ao2_cleanup(topic);
+
return NULL;
}
-void stasis_subscription_join(struct stasis_subscription *subscription)
+int stasis_subscription_set_congestion_limits(struct stasis_subscription *subscription,
+ long low_water, long high_water)
{
+ int res = -1;
+
if (subscription) {
- SCOPED_AO2LOCK(lock, subscription);
+ res = ast_taskprocessor_alert_set_levels(subscription->mailbox,
+ low_water, high_water);
+ }
+ return res;
+}
+void stasis_subscription_join(struct stasis_subscription *subscription)
+{
+ if (subscription) {
+ ao2_lock(subscription);
/* Wait until the processed flag has been set */
while (!subscription->final_message_processed) {
ast_cond_wait(&subscription->join_cond,
ao2_object_get_lockaddr(subscription));
}
+ ao2_unlock(subscription);
}
}
int stasis_subscription_is_done(struct stasis_subscription *subscription)
{
if (subscription) {
- SCOPED_AO2LOCK(lock, subscription);
+ int ret;
- return subscription->final_message_rxed;
+ ao2_lock(subscription);
+ ret = subscription->final_message_rxed;
+ ao2_unlock(subscription);
+
+ return ret;
}
/* Null subscription is about as done as you can get */
if (sub) {
size_t i;
struct stasis_topic *topic = sub->topic;
- SCOPED_AO2LOCK(lock_topic, topic);
+ ao2_lock(topic);
for (i = 0; i < AST_VECTOR_SIZE(&topic->subscribers); ++i) {
if (AST_VECTOR_GET(&topic->subscribers, i) == sub) {
+ ao2_unlock(topic);
return 1;
}
}
+ ao2_unlock(topic);
}
return 0;
static int topic_add_subscription(struct stasis_topic *topic, struct stasis_subscription *sub)
{
size_t idx;
- SCOPED_AO2LOCK(lock, topic);
+ ao2_lock(topic);
/* The reference from the topic to the subscription is shared with
* the owner of the subscription, which will explicitly unsubscribe
* to release it.
topic_add_subscription(
AST_VECTOR_GET(&topic->upstream_topics, idx), sub);
}
+ ao2_unlock(topic);
return 0;
}
static int topic_remove_subscription(struct stasis_topic *topic, struct stasis_subscription *sub)
{
size_t idx;
- SCOPED_AO2LOCK(lock_topic, topic);
+ int res;
+ ao2_lock(topic);
for (idx = 0; idx < AST_VECTOR_SIZE(&topic->upstream_topics); ++idx) {
topic_remove_subscription(
AST_VECTOR_GET(&topic->upstream_topics, idx), sub);
}
-
- return AST_VECTOR_REMOVE_ELEM_UNORDERED(&topic->subscribers, sub,
+ res = AST_VECTOR_REMOVE_ELEM_UNORDERED(&topic->subscribers, sub,
AST_VECTOR_ELEM_CLEANUP_NOOP);
+ ao2_unlock(topic);
+
+ return res;
}
/*!
{
int res;
size_t idx;
- RAII_VAR(struct stasis_forward *, forward, NULL, ao2_cleanup);
+ struct stasis_forward *forward;
if (!from_topic || !to_topic) {
return NULL;
/* Forwards to ourselves are implicit. */
if (to_topic == from_topic) {
- return ao2_bump(forward);
+ return forward;
}
forward->from_topic = ao2_bump(from_topic);
if (res != 0) {
ao2_unlock(from_topic);
ao2_unlock(to_topic);
+ ao2_ref(forward, -1);
return NULL;
}
ao2_unlock(from_topic);
ao2_unlock(to_topic);
- return ao2_bump(forward);
+ return forward;
}
static void subscription_change_dtor(void *obj)
void stasis_log_bad_type_access(const char *name)
{
+#ifdef AST_DEVMODE
ast_log(LOG_ERROR, "Use of %s() before init/after destruction\n", name);
+#endif
}
/*! \brief A multi object blob data structure to carry user event stasis messages */
struct ast_multi_object_blob *ast_multi_object_blob_create(struct ast_json *blob)
{
int type;
- RAII_VAR(struct ast_multi_object_blob *, multi,
- ao2_alloc(sizeof(*multi), multi_object_blob_dtor),
- ao2_cleanup);
+ struct ast_multi_object_blob *multi;
ast_assert(blob != NULL);
+ multi = ao2_alloc(sizeof(*multi), multi_object_blob_dtor);
if (!multi) {
return NULL;
}
for (type = 0; type < STASIS_UMOS_MAX; ++type) {
if (AST_VECTOR_INIT(&multi->snapshots[type], 0)) {
+ ao2_ref(multi, -1);
+
return NULL;
}
}
multi->blob = ast_json_ref(blob);
- ao2_ref(multi, +1);
return multi;
}
void ast_multi_object_blob_add(struct ast_multi_object_blob *multi,
enum stasis_user_multi_object_snapshot_type type, void *object)
{
- if (!multi || !object) {
- return;
+ if (!multi || !object || AST_VECTOR_APPEND(&multi->snapshots[type], object)) {
+ ao2_cleanup(object);
}
- AST_VECTOR_APPEND(&multi->snapshots[type],object);
}
/*! \brief Publish single channel user event (for app_userevent compatibility) */
void ast_multi_object_blob_single_channel_publish(struct ast_channel *chan,
struct stasis_message_type *type, struct ast_json *blob)
{
- RAII_VAR(struct stasis_message *, message, NULL, ao2_cleanup);
- RAII_VAR(struct ast_channel_snapshot *, channel_snapshot, NULL, ao2_cleanup);
- RAII_VAR(struct ast_multi_object_blob *, multi, NULL, ao2_cleanup);
+ struct stasis_message *message;
+ struct ast_channel_snapshot *channel_snapshot;
+ struct ast_multi_object_blob *multi;
if (!type) {
return;
}
channel_snapshot = ast_channel_snapshot_create(chan);
- ao2_ref(channel_snapshot, +1);
+ if (!channel_snapshot) {
+ ao2_ref(multi, -1);
+ return;
+ }
+
+ /* this call steals the channel_snapshot reference */
ast_multi_object_blob_add(multi, STASIS_UMOS_CHANNEL, channel_snapshot);
message = stasis_message_create(type, multi);
+ ao2_ref(multi, -1);
if (message) {
/* app_userevent still publishes to channel */
stasis_publish(ast_channel_topic(chan), message);
+ ao2_ref(message, -1);
}
}
struct stasis_message *message,
const struct stasis_message_sanitizer *sanitize)
{
- RAII_VAR(struct ast_json *, out, NULL, ast_json_unref);
+ struct ast_json *out;
struct ast_multi_object_blob *multi = stasis_message_data(message);
struct ast_json *blob = multi->blob;
const struct timeval *tv = stasis_message_timestamp(message);
ast_json_object_set(out, "type", ast_json_string_create("ChannelUserevent"));
ast_json_object_set(out, "timestamp", ast_json_timeval(*tv, NULL));
- ast_json_object_set(out, "eventname", ast_json_ref(ast_json_object_get(blob, "eventname")));
- ast_json_object_set(out, "userevent", ast_json_ref(blob)); /* eventname gets duplicated, that's ok */
+ ast_json_object_set(out, "eventname", ast_json_string_create(ast_json_string_get((ast_json_object_get(blob, "eventname")))));
+ ast_json_object_set(out, "userevent", ast_json_deep_copy(blob));
for (type = 0; type < STASIS_UMOS_MAX; ++type) {
for (i = 0; i < AST_VECTOR_SIZE(&multi->snapshots[type]); ++i) {
}
}
}
- return ast_json_ref(out);
+
+ return out;
}
/*! \internal \brief convert multi object blob to ami string */
for (type = 0; type < STASIS_UMOS_MAX; ++type) {
for (i = 0; i < AST_VECTOR_SIZE(&multi->snapshots[type]); ++i) {
- char *name = "";
+ char *name = NULL;
void *snapshot = AST_VECTOR_GET(&multi->snapshots[type], i);
ami_snapshot = NULL;
switch (type) {
case STASIS_UMOS_CHANNEL:
- ami_snapshot = ast_manager_build_channel_state_string_prefix(snapshot, name);
+ ami_snapshot = ast_manager_build_channel_state_string_prefix(snapshot, name ?: "");
break;
case STASIS_UMOS_BRIDGE:
- ami_snapshot = ast_manager_build_bridge_state_string_prefix(snapshot, name);
+ ami_snapshot = ast_manager_build_bridge_state_string_prefix(snapshot, name ?: "");
break;
case STASIS_UMOS_ENDPOINT:
ast_str_append(&ami_str, 0, "%s", ast_str_buffer(ami_snapshot));
ast_free(ami_snapshot);
}
+ ast_free(name);
}
}
struct ao2_container *declined;
};
+/*! \brief Threadpool configuration options */
+struct stasis_threadpool_conf {
+ /*! Initial size of the thread pool */
+ int initial_size;
+ /*! Time, in seconds, before we expire a thread */
+ int idle_timeout_sec;
+ /*! Maximum number of thread to allow */
+ int max_size;
+};
struct stasis_config {
+ /*! Thread pool configuration options */
+ struct stasis_threadpool_conf *threadpool_options;
+ /*! Declined message types */
struct stasis_declined_config *declined_message_types;
};
+static struct aco_type threadpool_option = {
+ .type = ACO_GLOBAL,
+ .name = "threadpool",
+ .item_offset = offsetof(struct stasis_config, threadpool_options),
+ .category = "threadpool",
+ .category_match = ACO_WHITELIST_EXACT,
+};
+
+static struct aco_type *threadpool_options[] = ACO_TYPES(&threadpool_option);
+
/*! \brief An aco_type structure to link the "declined_message_types" category to the stasis_declined_config type */
static struct aco_type declined_option = {
.type = ACO_GLOBAL,
.name = "declined_message_types",
.item_offset = offsetof(struct stasis_config, declined_message_types),
- .category_match = ACO_WHITELIST,
- .category = "^declined_message_types$",
+ .category_match = ACO_WHITELIST_EXACT,
+ .category = "declined_message_types",
};
struct aco_type *declined_options[] = ACO_TYPES(&declined_option);
struct aco_file stasis_conf = {
.filename = "stasis.conf",
- .types = ACO_TYPES(&declined_option),
+ .types = ACO_TYPES(&declined_option, &threadpool_option),
};
/*! \brief A global object container that will contain the stasis_config that gets swapped out on reloads */
static void stasis_declined_config_destructor(void *obj)
{
struct stasis_declined_config *declined = obj;
+
ao2_cleanup(declined->declined);
}
static void stasis_config_destructor(void *obj)
{
struct stasis_config *cfg = obj;
+
ao2_cleanup(cfg->declined_message_types);
+ ast_free(cfg->threadpool_options);
}
static void *stasis_config_alloc(void)
return NULL;
}
- /* Allocate/initialize memory */
- cfg->declined_message_types = ao2_alloc(sizeof(*cfg->declined_message_types), stasis_declined_config_destructor);
+ cfg->threadpool_options = ast_calloc(1, sizeof(*cfg->threadpool_options));
+ if (!cfg->threadpool_options) {
+ ao2_ref(cfg, -1);
+ return NULL;
+ }
+
+ cfg->declined_message_types = ao2_alloc(sizeof(*cfg->declined_message_types),
+ stasis_declined_config_destructor);
if (!cfg->declined_message_types) {
- goto error;
+ ao2_ref(cfg, -1);
+ return NULL;
}
cfg->declined_message_types->declined = ast_str_container_alloc(13);
if (!cfg->declined_message_types->declined) {
- goto error;
+ ao2_ref(cfg, -1);
+ return NULL;
}
return cfg;
-error:
- ao2_ref(cfg, -1);
- return NULL;
}
int stasis_message_type_declined(const char *name)
{
- RAII_VAR(struct stasis_config *, cfg, ao2_global_obj_ref(globals), ao2_cleanup);
+ struct stasis_config *cfg = ao2_global_obj_ref(globals);
char *name_in_declined;
int res;
if (!cfg || !cfg->declined_message_types) {
+ ao2_cleanup(cfg);
return 0;
}
name_in_declined = ao2_find(cfg->declined_message_types->declined, name, OBJ_SEARCH_KEY);
res = name_in_declined ? 1 : 0;
ao2_cleanup(name_in_declined);
+ ao2_ref(cfg, -1);
if (res) {
ast_log(LOG_NOTICE, "Declining to allocate Stasis message type '%s' due to configuration\n", name);
}
/*! \brief Cleanup function for graceful shutdowns */
static void stasis_cleanup(void)
{
+ ast_threadpool_shutdown(pool);
+ pool = NULL;
STASIS_MESSAGE_TYPE_CLEANUP(stasis_subscription_change_type);
STASIS_MESSAGE_TYPE_CLEANUP(ast_multi_user_event_type);
aco_info_destroy(&cfg_info);
int stasis_init(void)
{
+ struct stasis_config *cfg;
int cache_init;
+ struct ast_threadpool_options threadpool_opts = { 0, };
/* Be sure the types are cleaned up after the message bus */
ast_register_cleanup(stasis_cleanup);
return -1;
}
- aco_option_register_custom(&cfg_info, "decline", ACO_EXACT, declined_options, "", declined_handler, 0);
+ aco_option_register_custom(&cfg_info, "decline", ACO_EXACT,
+ declined_options, "", declined_handler, 0);
+ aco_option_register(&cfg_info, "initial_size", ACO_EXACT,
+ threadpool_options, "5", OPT_INT_T, PARSE_IN_RANGE,
+ FLDSET(struct stasis_threadpool_conf, initial_size), 0,
+ INT_MAX);
+ aco_option_register(&cfg_info, "idle_timeout_sec", ACO_EXACT,
+ threadpool_options, "20", OPT_INT_T, PARSE_IN_RANGE,
+ FLDSET(struct stasis_threadpool_conf, idle_timeout_sec), 0,
+ INT_MAX);
+ aco_option_register(&cfg_info, "max_size", ACO_EXACT,
+ threadpool_options, "50", OPT_INT_T, PARSE_IN_RANGE,
+ FLDSET(struct stasis_threadpool_conf, max_size), 0,
+ INT_MAX);
if (aco_process_config(&cfg_info, 0) == ACO_PROCESS_ERROR) {
- RAII_VAR(struct stasis_config *, stasis_cfg, stasis_config_alloc(), ao2_cleanup);
+ struct stasis_config *default_cfg = stasis_config_alloc();
+
+ if (!default_cfg) {
+ return -1;
+ }
- if (aco_set_defaults(&declined_option, "declined_message_types", stasis_cfg->declined_message_types)) {
+ if (aco_set_defaults(&threadpool_option, "threadpool", default_cfg->threadpool_options)) {
+ ast_log(LOG_ERROR, "Failed to initialize defaults on Stasis configuration object\n");
+ ao2_ref(default_cfg, -1);
+
+ return -1;
+ }
+
+ if (aco_set_defaults(&declined_option, "declined_message_types", default_cfg->declined_message_types)) {
ast_log(LOG_ERROR, "Failed to load stasis.conf and failed to initialize defaults.\n");
+ ao2_ref(default_cfg, -1);
+
return -1;
}
- ast_log(LOG_NOTICE, "Could not load stasis config; using defaults\n");
- ao2_global_obj_replace_unref(globals, stasis_cfg);
+ ast_log(LOG_NOTICE, "Could not load Stasis configuration; using defaults\n");
+ ao2_global_obj_replace_unref(globals, default_cfg);
+ cfg = default_cfg;
+ } else {
+ cfg = ao2_global_obj_ref(globals);
+ if (!cfg) {
+ ast_log(LOG_ERROR, "Failed to obtain Stasis configuration object\n");
+
+ return -1;
+ }
+ }
+
+ threadpool_opts.version = AST_THREADPOOL_OPTIONS_VERSION;
+ threadpool_opts.initial_size = cfg->threadpool_options->initial_size;
+ threadpool_opts.auto_increment = 1;
+ threadpool_opts.max_size = cfg->threadpool_options->max_size;
+ threadpool_opts.idle_timeout = cfg->threadpool_options->idle_timeout_sec;
+ pool = ast_threadpool_create("stasis-core", NULL, &threadpool_opts);
+ ao2_ref(cfg, -1);
+ if (!pool) {
+ ast_log(LOG_ERROR, "Failed to create 'stasis-core' threadpool\n");
+
+ return -1;
}
cache_init = stasis_cache_init();
return 0;
}
-