* \author Joshua Colp <jcolp@digium.com>
*/
-#include "asterisk.h"
+/*** MODULEINFO
+ <support_level>core</support_level>
+ ***/
-ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
+#include "asterisk.h"
#include <signal.h>
#include "asterisk/slinfactory.h"
#include "asterisk/frame.h"
#include "asterisk/translate.h"
+#include "asterisk/format_cache.h"
+
+#define AST_AUDIOHOOK_SYNC_TOLERANCE 100 /*!< Tolerance in milliseconds for audiohooks synchronization */
+#define AST_AUDIOHOOK_SMALL_QUEUE_TOLERANCE 100 /*!< When small queue is enabled, this is the maximum amount of audio that can remain queued at a time. */
+#define AST_AUDIOHOOK_LONG_QUEUE_TOLERANCE 500 /*!< Otheriwise we still don't want the queue to grow indefinitely */
+
+#define DEFAULT_INTERNAL_SAMPLE_RATE 8000
struct ast_audiohook_translate {
struct ast_trans_pvt *trans_pvt;
- format_t format;
+ struct ast_format *format;
};
struct ast_audiohook_list {
+ /* If all the audiohooks in this list are capable
+ * of processing slinear at any sample rate, this
+ * variable will be set and the sample rate will
+ * be preserved during ast_audiohook_write_list()*/
+ int native_slin_compatible;
+ int list_internal_samp_rate;/*!< Internal sample rate used when writing to the audiohook list */
+
struct ast_audiohook_translate in_translate[2];
struct ast_audiohook_translate out_translate[2];
AST_LIST_HEAD_NOLOCK(, ast_audiohook) spy_list;
AST_LIST_HEAD_NOLOCK(, ast_audiohook) manipulate_list;
};
+static int audiohook_set_internal_rate(struct ast_audiohook *audiohook, int rate, int reset)
+{
+ struct ast_format *slin;
+
+ if (audiohook->hook_internal_samp_rate == rate) {
+ return 0;
+ }
+
+ audiohook->hook_internal_samp_rate = rate;
+
+ slin = ast_format_cache_get_slin_by_rate(rate);
+
+ /* Setup the factories that are needed for this audiohook type */
+ switch (audiohook->type) {
+ case AST_AUDIOHOOK_TYPE_SPY:
+ case AST_AUDIOHOOK_TYPE_WHISPER:
+ if (reset) {
+ ast_slinfactory_destroy(&audiohook->read_factory);
+ ast_slinfactory_destroy(&audiohook->write_factory);
+ }
+ ast_slinfactory_init_with_format(&audiohook->read_factory, slin);
+ ast_slinfactory_init_with_format(&audiohook->write_factory, slin);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/*! \brief Initialize an audiohook structure
+ *
* \param audiohook Audiohook structure
* \param type
- * \param source
+ * \param source, init_flags
+ *
* \return Returns 0 on success, -1 on failure
*/
-int ast_audiohook_init(struct ast_audiohook *audiohook, enum ast_audiohook_type type, const char *source)
+int ast_audiohook_init(struct ast_audiohook *audiohook, enum ast_audiohook_type type, const char *source, enum ast_audiohook_init_flags init_flags)
{
/* Need to keep the type and source */
audiohook->type = type;
ast_mutex_init(&audiohook->lock);
ast_cond_init(&audiohook->trigger, NULL);
- /* Setup the factories that are needed for this audiohook type */
- switch (type) {
- case AST_AUDIOHOOK_TYPE_SPY:
- ast_slinfactory_init(&audiohook->read_factory);
- case AST_AUDIOHOOK_TYPE_WHISPER:
- ast_slinfactory_init(&audiohook->write_factory);
- break;
- default:
- break;
- }
+ audiohook->init_flags = init_flags;
+
+ /* initialize internal rate at 8khz, this will adjust if necessary */
+ audiohook_set_internal_rate(audiohook, DEFAULT_INTERNAL_SAMPLE_RATE, 0);
/* Since we are just starting out... this audiohook is new */
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_NEW);
/* Drop the factories used by this audiohook type */
switch (audiohook->type) {
case AST_AUDIOHOOK_TYPE_SPY:
- ast_slinfactory_destroy(&audiohook->read_factory);
case AST_AUDIOHOOK_TYPE_WHISPER:
+ ast_slinfactory_destroy(&audiohook->read_factory);
ast_slinfactory_destroy(&audiohook->write_factory);
break;
default:
if (audiohook->trans_pvt)
ast_translator_free_path(audiohook->trans_pvt);
+ ao2_cleanup(audiohook->format);
+
/* Lock and trigger be gone! */
ast_cond_destroy(&audiohook->trigger);
ast_mutex_destroy(&audiohook->lock);
return 0;
}
+#define SHOULD_MUTE(hook, dir) \
+ ((ast_test_flag(hook, AST_AUDIOHOOK_MUTE_READ) && (dir == AST_AUDIOHOOK_DIRECTION_READ)) || \
+ (ast_test_flag(hook, AST_AUDIOHOOK_MUTE_WRITE) && (dir == AST_AUDIOHOOK_DIRECTION_WRITE)) || \
+ (ast_test_flag(hook, AST_AUDIOHOOK_MUTE_READ | AST_AUDIOHOOK_MUTE_WRITE) == (AST_AUDIOHOOK_MUTE_READ | AST_AUDIOHOOK_MUTE_WRITE)))
+
/*! \brief Writes a frame into the audiohook structure
* \param audiohook Audiohook structure
* \param direction Direction the audio frame came from
*rwtime = ast_tvnow();
our_factory_samples = ast_slinfactory_available(factory);
- our_factory_ms = ast_tvdiff_ms(*rwtime, previous_time) + (our_factory_samples / 8);
+ our_factory_ms = ast_tvdiff_ms(*rwtime, previous_time) + (our_factory_samples / (audiohook->hook_internal_samp_rate / 1000));
other_factory_samples = ast_slinfactory_available(other_factory);
- other_factory_ms = other_factory_samples / 8;
+ other_factory_ms = other_factory_samples / (audiohook->hook_internal_samp_rate / 1000);
- if (ast_test_flag(audiohook, AST_AUDIOHOOK_TRIGGER_SYNC) && other_factory_samples && (our_factory_ms - other_factory_ms > AST_AUDIOHOOK_SYNC_TOLERANCE)) {
- if (option_debug)
- ast_log(LOG_DEBUG, "Flushing audiohook %p so it remains in sync\n", audiohook);
+ if (ast_test_flag(audiohook, AST_AUDIOHOOK_TRIGGER_SYNC) && (our_factory_ms - other_factory_ms > AST_AUDIOHOOK_SYNC_TOLERANCE)) {
+ ast_debug(1, "Flushing audiohook %p so it remains in sync\n", audiohook);
ast_slinfactory_flush(factory);
ast_slinfactory_flush(other_factory);
}
- if (ast_test_flag(audiohook, AST_AUDIOHOOK_SMALL_QUEUE) && (our_factory_samples > 640 || other_factory_samples > 640)) {
- if (option_debug) {
- ast_log(LOG_DEBUG, "Audiohook %p has stale audio in its factories. Flushing them both\n", audiohook);
- }
+ if (ast_test_flag(audiohook, AST_AUDIOHOOK_SMALL_QUEUE) && ((our_factory_ms > AST_AUDIOHOOK_SMALL_QUEUE_TOLERANCE) || (other_factory_ms > AST_AUDIOHOOK_SMALL_QUEUE_TOLERANCE))) {
+ ast_debug(1, "Audiohook %p has stale audio in its factories. Flushing them both\n", audiohook);
+ ast_slinfactory_flush(factory);
+ ast_slinfactory_flush(other_factory);
+ } else if ((our_factory_ms > AST_AUDIOHOOK_LONG_QUEUE_TOLERANCE) || (other_factory_ms > AST_AUDIOHOOK_LONG_QUEUE_TOLERANCE)) {
+ ast_debug(1, "Audiohook %p has stale audio in its factories. Flushing them both\n", audiohook);
ast_slinfactory_flush(factory);
ast_slinfactory_flush(other_factory);
}
short buf[samples];
struct ast_frame frame = {
.frametype = AST_FRAME_VOICE,
- .subclass.codec = AST_FORMAT_SLINEAR,
+ .subclass.format = ast_format_cache_get_slin_by_rate(audiohook->hook_internal_samp_rate),
.data.ptr = buf,
.datalen = sizeof(buf),
.samples = samples,
};
/* Ensure the factory is able to give us the samples we want */
- if (samples > ast_slinfactory_available(factory))
+ if (samples > ast_slinfactory_available(factory)) {
return NULL;
-
+ }
+
/* Read data in from factory */
- if (!ast_slinfactory_read(factory, buf, samples))
+ if (!ast_slinfactory_read(factory, buf, samples)) {
return NULL;
+ }
- /* If a volume adjustment needs to be applied apply it */
- if (vol)
+ if (SHOULD_MUTE(audiohook, direction)) {
+ /* Swap frame data for zeros if mute is required */
+ ast_frame_clear(&frame);
+ } else if (vol) {
+ /* If a volume adjustment needs to be applied apply it */
ast_frame_adjust_volume(&frame, vol);
+ }
return ast_frdup(&frame);
}
-static struct ast_frame *audiohook_read_frame_both(struct ast_audiohook *audiohook, size_t samples)
+static struct ast_frame *audiohook_read_frame_both(struct ast_audiohook *audiohook, size_t samples, struct ast_frame **read_reference, struct ast_frame **write_reference)
{
- int i = 0, usable_read, usable_write;
- short buf1[samples], buf2[samples], *read_buf = NULL, *write_buf = NULL, *final_buf = NULL, *data1 = NULL, *data2 = NULL;
+ int count;
+ int usable_read;
+ int usable_write;
+ short adjust_value;
+ short buf1[samples];
+ short buf2[samples];
+ short *read_buf = NULL;
+ short *write_buf = NULL;
struct ast_frame frame = {
.frametype = AST_FRAME_VOICE,
- .subclass.codec = AST_FORMAT_SLINEAR,
- .data.ptr = NULL,
.datalen = sizeof(buf1),
.samples = samples,
};
if (!usable_read && !usable_write) {
/* If both factories are unusable bail out */
- ast_debug(1, "Read factory %p and write factory %p both fail to provide %zd samples\n", &audiohook->read_factory, &audiohook->write_factory, samples);
+ ast_debug(1, "Read factory %p and write factory %p both fail to provide %zu samples\n", &audiohook->read_factory, &audiohook->write_factory, samples);
return NULL;
}
if (usable_read) {
if (ast_slinfactory_read(&audiohook->read_factory, buf1, samples)) {
read_buf = buf1;
- /* Adjust read volume if need be */
- if (audiohook->options.read_volume) {
- int count = 0;
- short adjust_value = abs(audiohook->options.read_volume);
+
+ if ((ast_test_flag(audiohook, AST_AUDIOHOOK_MUTE_READ))) {
+ /* Clear the frame data if we are muting */
+ memset(buf1, 0, sizeof(buf1));
+ } else if (audiohook->options.read_volume) {
+ /* Adjust read volume if need be */
+ adjust_value = abs(audiohook->options.read_volume);
for (count = 0; count < samples; count++) {
- if (audiohook->options.read_volume > 0)
+ if (audiohook->options.read_volume > 0) {
ast_slinear_saturated_multiply(&buf1[count], &adjust_value);
- else if (audiohook->options.read_volume < 0)
+ } else if (audiohook->options.read_volume < 0) {
ast_slinear_saturated_divide(&buf1[count], &adjust_value);
+ }
}
}
}
- } else if (option_debug)
- ast_log(LOG_DEBUG, "Failed to get %d samples from read factory %p\n", (int)samples, &audiohook->read_factory);
+ } else {
+ ast_debug(1, "Failed to get %d samples from read factory %p\n", (int)samples, &audiohook->read_factory);
+ }
/* Move on to the write factory... if there are enough samples, read them in */
if (usable_write) {
if (ast_slinfactory_read(&audiohook->write_factory, buf2, samples)) {
write_buf = buf2;
- /* Adjust write volume if need be */
- if (audiohook->options.write_volume) {
- int count = 0;
- short adjust_value = abs(audiohook->options.write_volume);
+
+ if ((ast_test_flag(audiohook, AST_AUDIOHOOK_MUTE_WRITE))) {
+ /* Clear the frame data if we are muting */
+ memset(buf2, 0, sizeof(buf2));
+ } else if (audiohook->options.write_volume) {
+ /* Adjust write volume if need be */
+ adjust_value = abs(audiohook->options.write_volume);
for (count = 0; count < samples; count++) {
- if (audiohook->options.write_volume > 0)
+ if (audiohook->options.write_volume > 0) {
ast_slinear_saturated_multiply(&buf2[count], &adjust_value);
- else if (audiohook->options.write_volume < 0)
+ } else if (audiohook->options.write_volume < 0) {
ast_slinear_saturated_divide(&buf2[count], &adjust_value);
+ }
}
}
}
- } else if (option_debug)
- ast_log(LOG_DEBUG, "Failed to get %d samples from write factory %p\n", (int)samples, &audiohook->write_factory);
+ } else {
+ ast_debug(1, "Failed to get %d samples from write factory %p\n", (int)samples, &audiohook->write_factory);
+ }
+
+ frame.subclass.format = ast_format_cache_get_slin_by_rate(audiohook->hook_internal_samp_rate);
/* Basically we figure out which buffer to use... and if mixing can be done here */
- if (!read_buf && !write_buf)
- return NULL;
- else if (read_buf && write_buf) {
- for (i = 0, data1 = read_buf, data2 = write_buf; i < samples; i++, data1++, data2++)
- ast_slinear_saturated_add(data1, data2);
- final_buf = buf1;
- } else if (read_buf)
- final_buf = buf1;
- else if (write_buf)
- final_buf = buf2;
+ if (read_buf && read_reference) {
+ frame.data.ptr = read_buf;
+ *read_reference = ast_frdup(&frame);
+ }
+ if (write_buf && write_reference) {
+ frame.data.ptr = write_buf;
+ *write_reference = ast_frdup(&frame);
+ }
- /* Make the final buffer part of the frame, so it gets duplicated fine */
- frame.data.ptr = final_buf;
+ /* Make the correct buffer part of the built frame, so it gets duplicated. */
+ if (read_buf) {
+ frame.data.ptr = read_buf;
+ if (write_buf) {
+ for (count = 0; count < samples; count++) {
+ ast_slinear_saturated_add(read_buf++, write_buf++);
+ }
+ }
+ } else if (write_buf) {
+ frame.data.ptr = write_buf;
+ } else {
+ return NULL;
+ }
/* Yahoo, a combined copy of the audio! */
return ast_frdup(&frame);
}
-/*! \brief Reads a frame in from the audiohook structure
- * \param audiohook Audiohook structure
- * \param samples Number of samples wanted
- * \param direction Direction the audio frame came from
- * \param format Format of frame remote side wants back
- * \return Returns frame on success, NULL on failure
- */
-struct ast_frame *ast_audiohook_read_frame(struct ast_audiohook *audiohook, size_t samples, enum ast_audiohook_direction direction, format_t format)
+static struct ast_frame *audiohook_read_frame_helper(struct ast_audiohook *audiohook, size_t samples, enum ast_audiohook_direction direction, struct ast_format *format, struct ast_frame **read_reference, struct ast_frame **write_reference)
{
struct ast_frame *read_frame = NULL, *final_frame = NULL;
+ struct ast_format *slin;
+
+ /*
+ * Update the rate if compatibility mode is turned off or if it is
+ * turned on and the format rate is higher than the current rate.
+ *
+ * This makes it so any unnecessary rate switching/resetting does
+ * not take place and also any associated audiohook_list's internal
+ * sample rate maintains the highest sample rate between hooks.
+ */
+ if (!ast_test_flag(audiohook, AST_AUDIOHOOK_COMPATIBLE) ||
+ (ast_test_flag(audiohook, AST_AUDIOHOOK_COMPATIBLE) &&
+ ast_format_get_sample_rate(format) > audiohook->hook_internal_samp_rate)) {
+ audiohook_set_internal_rate(audiohook, ast_format_get_sample_rate(format), 1);
+ }
- if (!(read_frame = (direction == AST_AUDIOHOOK_DIRECTION_BOTH ? audiohook_read_frame_both(audiohook, samples) : audiohook_read_frame_single(audiohook, samples, direction))))
+ /* If the sample rate of the requested format differs from that of the underlying audiohook
+ * sample rate determine how many samples we actually need to get from the audiohook. This
+ * needs to occur as the signed linear factory stores them at the rate of the audiohook.
+ * We do this by determining the duration of audio they've requested and then determining
+ * how many samples that would be in the audiohook format.
+ */
+ if (ast_format_get_sample_rate(format) != audiohook->hook_internal_samp_rate) {
+ samples = (audiohook->hook_internal_samp_rate / 1000) * (samples / (ast_format_get_sample_rate(format) / 1000));
+ }
+
+ if (!(read_frame = (direction == AST_AUDIOHOOK_DIRECTION_BOTH ?
+ audiohook_read_frame_both(audiohook, samples, read_reference, write_reference) :
+ audiohook_read_frame_single(audiohook, samples, direction)))) {
return NULL;
+ }
+
+ slin = ast_format_cache_get_slin_by_rate(audiohook->hook_internal_samp_rate);
/* If they don't want signed linear back out, we'll have to send it through the translation path */
- if (format != AST_FORMAT_SLINEAR) {
+ if (ast_format_cmp(format, slin) != AST_FORMAT_CMP_EQUAL) {
/* Rebuild translation path if different format then previously */
- if (audiohook->format != format) {
+ if (ast_format_cmp(format, audiohook->format) == AST_FORMAT_CMP_NOT_EQUAL) {
if (audiohook->trans_pvt) {
ast_translator_free_path(audiohook->trans_pvt);
audiohook->trans_pvt = NULL;
}
+
/* Setup new translation path for this format... if we fail we can't very well return signed linear so free the frame and return nothing */
- if (!(audiohook->trans_pvt = ast_translator_build_path(format, AST_FORMAT_SLINEAR))) {
+ if (!(audiohook->trans_pvt = ast_translator_build_path(format, slin))) {
ast_frfree(read_frame);
return NULL;
}
+ ao2_replace(audiohook->format, format);
}
/* Convert to requested format, and allow the read in frame to be freed */
final_frame = ast_translate(audiohook->trans_pvt, read_frame, 1);
return final_frame;
}
+/*! \brief Reads a frame in from the audiohook structure
+ * \param audiohook Audiohook structure
+ * \param samples Number of samples wanted in requested output format
+ * \param direction Direction the audio frame came from
+ * \param format Format of frame remote side wants back
+ * \return Returns frame on success, NULL on failure
+ */
+struct ast_frame *ast_audiohook_read_frame(struct ast_audiohook *audiohook, size_t samples, enum ast_audiohook_direction direction, struct ast_format *format)
+{
+ return audiohook_read_frame_helper(audiohook, samples, direction, format, NULL, NULL);
+}
+
+/*! \brief Reads a frame in from the audiohook structure
+ * \param audiohook Audiohook structure
+ * \param samples Number of samples wanted
+ * \param direction Direction the audio frame came from
+ * \param format Format of frame remote side wants back
+ * \param read_frame frame pointer for copying read frame data
+ * \param write_frame frame pointer for copying write frame data
+ * \return Returns frame on success, NULL on failure
+ */
+struct ast_frame *ast_audiohook_read_frame_all(struct ast_audiohook *audiohook, size_t samples, struct ast_format *format, struct ast_frame **read_frame, struct ast_frame **write_frame)
+{
+ return audiohook_read_frame_helper(audiohook, samples, AST_AUDIOHOOK_DIRECTION_BOTH, format, read_frame, write_frame);
+}
+
+static void audiohook_list_set_samplerate_compatibility(struct ast_audiohook_list *audiohook_list)
+{
+ struct ast_audiohook *ah = NULL;
+
+ /*
+ * Anytime the samplerate compatibility is set (attach/remove an audiohook) the
+ * list's internal sample rate needs to be reset so that the next time processing
+ * through write_list, if needed, it will get updated to the correct rate.
+ *
+ * A list's internal rate always chooses the higher between its own rate and a
+ * given rate. If the current rate is being driven by an audiohook that wanted a
+ * higher rate then when this audiohook is removed the list's rate would remain
+ * at that level when it should be lower, and with no way to lower it since any
+ * rate compared against it would be lower.
+ *
+ * By setting it back to the lowest rate it can recalulate the new highest rate.
+ */
+ audiohook_list->list_internal_samp_rate = DEFAULT_INTERNAL_SAMPLE_RATE;
+
+ audiohook_list->native_slin_compatible = 1;
+ AST_LIST_TRAVERSE(&audiohook_list->manipulate_list, ah, list) {
+ if (!(ah->init_flags & AST_AUDIOHOOK_MANIPULATE_ALL_RATES)) {
+ audiohook_list->native_slin_compatible = 0;
+ return;
+ }
+ }
+}
+
/*! \brief Attach audiohook to channel
* \param chan Channel
* \param audiohook Audiohook structure
{
ast_channel_lock(chan);
- if (!chan->audiohooks) {
+ if (!ast_channel_audiohooks(chan)) {
+ struct ast_audiohook_list *ahlist;
/* Whoops... allocate a new structure */
- if (!(chan->audiohooks = ast_calloc(1, sizeof(*chan->audiohooks)))) {
+ if (!(ahlist = ast_calloc(1, sizeof(*ahlist)))) {
ast_channel_unlock(chan);
return -1;
}
- AST_LIST_HEAD_INIT_NOLOCK(&chan->audiohooks->spy_list);
- AST_LIST_HEAD_INIT_NOLOCK(&chan->audiohooks->whisper_list);
- AST_LIST_HEAD_INIT_NOLOCK(&chan->audiohooks->manipulate_list);
+ ast_channel_audiohooks_set(chan, ahlist);
+ AST_LIST_HEAD_INIT_NOLOCK(&ast_channel_audiohooks(chan)->spy_list);
+ AST_LIST_HEAD_INIT_NOLOCK(&ast_channel_audiohooks(chan)->whisper_list);
+ AST_LIST_HEAD_INIT_NOLOCK(&ast_channel_audiohooks(chan)->manipulate_list);
+ /* This sample rate will adjust as necessary when writing to the list. */
+ ast_channel_audiohooks(chan)->list_internal_samp_rate = DEFAULT_INTERNAL_SAMPLE_RATE;
}
/* Drop into respective list */
- if (audiohook->type == AST_AUDIOHOOK_TYPE_SPY)
- AST_LIST_INSERT_TAIL(&chan->audiohooks->spy_list, audiohook, list);
- else if (audiohook->type == AST_AUDIOHOOK_TYPE_WHISPER)
- AST_LIST_INSERT_TAIL(&chan->audiohooks->whisper_list, audiohook, list);
- else if (audiohook->type == AST_AUDIOHOOK_TYPE_MANIPULATE)
- AST_LIST_INSERT_TAIL(&chan->audiohooks->manipulate_list, audiohook, list);
+ if (audiohook->type == AST_AUDIOHOOK_TYPE_SPY) {
+ AST_LIST_INSERT_TAIL(&ast_channel_audiohooks(chan)->spy_list, audiohook, list);
+ } else if (audiohook->type == AST_AUDIOHOOK_TYPE_WHISPER) {
+ AST_LIST_INSERT_TAIL(&ast_channel_audiohooks(chan)->whisper_list, audiohook, list);
+ } else if (audiohook->type == AST_AUDIOHOOK_TYPE_MANIPULATE) {
+ AST_LIST_INSERT_TAIL(&ast_channel_audiohooks(chan)->manipulate_list, audiohook, list);
+ }
+
+ /*
+ * Initialize the audiohook's rate to the default. If it needs to be,
+ * it will get updated later.
+ */
+ audiohook_set_internal_rate(audiohook, DEFAULT_INTERNAL_SAMPLE_RATE, 1);
+ audiohook_list_set_samplerate_compatibility(ast_channel_audiohooks(chan));
/* Change status over to running since it is now attached */
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_RUNNING);
+ if (ast_channel_is_bridged(chan)) {
+ ast_channel_set_unbridged_nolock(chan, 1);
+ }
+
ast_channel_unlock(chan);
return 0;
}
/*! \brief Update audiohook's status
- * \param audiohook status enum
* \param audiohook Audiohook structure
+ * \param status Audiohook status enum
*
* \note once status is updated to DONE, this function can not be used to set the
* status back to any other setting. Setting DONE effectively locks the status as such.
*/
int ast_audiohook_detach(struct ast_audiohook *audiohook)
{
- if (audiohook->status == AST_AUDIOHOOK_STATUS_NEW || audiohook->status == AST_AUDIOHOOK_STATUS_DONE)
+ if (audiohook->status == AST_AUDIOHOOK_STATUS_NEW || audiohook->status == AST_AUDIOHOOK_STATUS_DONE) {
return 0;
+ }
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_SHUTDOWN);
- while (audiohook->status != AST_AUDIOHOOK_STATUS_DONE)
+ while (audiohook->status != AST_AUDIOHOOK_STATUS_DONE) {
ast_audiohook_trigger_wait(audiohook);
+ }
return 0;
}
-/*! \brief Detach audiohooks from list and destroy said list
- * \param audiohook_list List of audiohooks
- * \return Returns 0 on success, -1 on failure
- */
-int ast_audiohook_detach_list(struct ast_audiohook_list *audiohook_list)
+void ast_audiohook_detach_list(struct ast_audiohook_list *audiohook_list)
{
- int i = 0;
- struct ast_audiohook *audiohook = NULL;
+ int i;
+ struct ast_audiohook *audiohook;
+
+ if (!audiohook_list) {
+ return;
+ }
/* Drop any spies */
while ((audiohook = AST_LIST_REMOVE_HEAD(&audiohook_list->spy_list, list))) {
/* Drop translation paths if present */
for (i = 0; i < 2; i++) {
- if (audiohook_list->in_translate[i].trans_pvt)
+ if (audiohook_list->in_translate[i].trans_pvt) {
ast_translator_free_path(audiohook_list->in_translate[i].trans_pvt);
- if (audiohook_list->out_translate[i].trans_pvt)
+ ao2_cleanup(audiohook_list->in_translate[i].format);
+ }
+ if (audiohook_list->out_translate[i].trans_pvt) {
ast_translator_free_path(audiohook_list->out_translate[i].trans_pvt);
+ ao2_cleanup(audiohook_list->in_translate[i].format);
+ }
}
-
+
/* Free ourselves */
ast_free(audiohook_list);
-
- return 0;
}
/*! \brief find an audiohook based on its source
struct ast_audiohook *audiohook = NULL;
AST_LIST_TRAVERSE(&audiohook_list->spy_list, audiohook, list) {
- if (!strcasecmp(audiohook->source, source))
+ if (!strcasecmp(audiohook->source, source)) {
return audiohook;
+ }
}
AST_LIST_TRAVERSE(&audiohook_list->whisper_list, audiohook, list) {
- if (!strcasecmp(audiohook->source, source))
+ if (!strcasecmp(audiohook->source, source)) {
return audiohook;
+ }
}
AST_LIST_TRAVERSE(&audiohook_list->manipulate_list, audiohook, list) {
- if (!strcasecmp(audiohook->source, source))
+ if (!strcasecmp(audiohook->source, source)) {
return audiohook;
+ }
}
return NULL;
}
-void ast_audiohook_move_by_source(struct ast_channel *old_chan, struct ast_channel *new_chan, const char *source)
+static void audiohook_move(struct ast_channel *old_chan, struct ast_channel *new_chan, struct ast_audiohook *audiohook)
{
- struct ast_audiohook *audiohook;
enum ast_audiohook_status oldstatus;
- if (!old_chan->audiohooks || !(audiohook = find_audiohook_by_source(old_chan->audiohooks, source))) {
- return;
- }
-
/* By locking both channels and the audiohook, we can assure that
* another thread will not have a chance to read the audiohook's status
* as done, even though ast_audiohook_remove signals the trigger
ast_audiohook_unlock(audiohook);
}
+void ast_audiohook_move_by_source(struct ast_channel *old_chan, struct ast_channel *new_chan, const char *source)
+{
+ struct ast_audiohook *audiohook;
+
+ if (!ast_channel_audiohooks(old_chan)) {
+ return;
+ }
+
+ audiohook = find_audiohook_by_source(ast_channel_audiohooks(old_chan), source);
+ if (!audiohook) {
+ return;
+ }
+
+ audiohook_move(old_chan, new_chan, audiohook);
+}
+
+void ast_audiohook_move_all(struct ast_channel *old_chan, struct ast_channel *new_chan)
+{
+ struct ast_audiohook *audiohook;
+ struct ast_audiohook_list *audiohook_list;
+
+ audiohook_list = ast_channel_audiohooks(old_chan);
+ if (!audiohook_list) {
+ return;
+ }
+
+ AST_LIST_TRAVERSE_SAFE_BEGIN(&audiohook_list->spy_list, audiohook, list) {
+ audiohook_move(old_chan, new_chan, audiohook);
+ }
+ AST_LIST_TRAVERSE_SAFE_END;
+
+ AST_LIST_TRAVERSE_SAFE_BEGIN(&audiohook_list->whisper_list, audiohook, list) {
+ audiohook_move(old_chan, new_chan, audiohook);
+ }
+ AST_LIST_TRAVERSE_SAFE_END;
+
+ AST_LIST_TRAVERSE_SAFE_BEGIN(&audiohook_list->manipulate_list, audiohook, list) {
+ audiohook_move(old_chan, new_chan, audiohook);
+ }
+ AST_LIST_TRAVERSE_SAFE_END;
+}
+
/*! \brief Detach specified source audiohook from channel
* \param chan Channel to detach from
* \param source Name of source to detach
ast_channel_lock(chan);
/* Ensure the channel has audiohooks on it */
- if (!chan->audiohooks) {
+ if (!ast_channel_audiohooks(chan)) {
ast_channel_unlock(chan);
return -1;
}
- audiohook = find_audiohook_by_source(chan->audiohooks, source);
+ audiohook = find_audiohook_by_source(ast_channel_audiohooks(chan), source);
ast_channel_unlock(chan);
- if (audiohook && audiohook->status != AST_AUDIOHOOK_STATUS_DONE)
+ if (audiohook && audiohook->status != AST_AUDIOHOOK_STATUS_DONE) {
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_SHUTDOWN);
+ }
return (audiohook ? 0 : -1);
}
{
ast_channel_lock(chan);
- if (!chan->audiohooks) {
+ if (!ast_channel_audiohooks(chan)) {
ast_channel_unlock(chan);
return -1;
}
- if (audiohook->type == AST_AUDIOHOOK_TYPE_SPY)
- AST_LIST_REMOVE(&chan->audiohooks->spy_list, audiohook, list);
- else if (audiohook->type == AST_AUDIOHOOK_TYPE_WHISPER)
- AST_LIST_REMOVE(&chan->audiohooks->whisper_list, audiohook, list);
- else if (audiohook->type == AST_AUDIOHOOK_TYPE_MANIPULATE)
- AST_LIST_REMOVE(&chan->audiohooks->manipulate_list, audiohook, list);
+ if (audiohook->type == AST_AUDIOHOOK_TYPE_SPY) {
+ AST_LIST_REMOVE(&ast_channel_audiohooks(chan)->spy_list, audiohook, list);
+ } else if (audiohook->type == AST_AUDIOHOOK_TYPE_WHISPER) {
+ AST_LIST_REMOVE(&ast_channel_audiohooks(chan)->whisper_list, audiohook, list);
+ } else if (audiohook->type == AST_AUDIOHOOK_TYPE_MANIPULATE) {
+ AST_LIST_REMOVE(&ast_channel_audiohooks(chan)->manipulate_list, audiohook, list);
+ }
+ audiohook_list_set_samplerate_compatibility(ast_channel_audiohooks(chan));
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_DONE);
+ if (ast_channel_is_bridged(chan)) {
+ ast_channel_set_unbridged_nolock(chan, 1);
+ }
+
ast_channel_unlock(chan);
return 0;
static struct ast_frame *dtmf_audiohook_write_list(struct ast_channel *chan, struct ast_audiohook_list *audiohook_list, enum ast_audiohook_direction direction, struct ast_frame *frame)
{
struct ast_audiohook *audiohook = NULL;
+ int removed = 0;
AST_LIST_TRAVERSE_SAFE_BEGIN(&audiohook_list->manipulate_list, audiohook, list) {
ast_audiohook_lock(audiohook);
if (audiohook->status != AST_AUDIOHOOK_STATUS_RUNNING) {
AST_LIST_REMOVE_CURRENT(list);
+ removed = 1;
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_DONE);
ast_audiohook_unlock(audiohook);
audiohook->manipulate_callback(audiohook, NULL, NULL, 0);
+ if (ast_channel_is_bridged(chan)) {
+ ast_channel_set_unbridged_nolock(chan, 1);
+ }
continue;
}
- if (ast_test_flag(audiohook, AST_AUDIOHOOK_WANTS_DTMF))
+ if (ast_test_flag(audiohook, AST_AUDIOHOOK_WANTS_DTMF)) {
audiohook->manipulate_callback(audiohook, chan, frame, direction);
+ }
ast_audiohook_unlock(audiohook);
}
AST_LIST_TRAVERSE_SAFE_END;
+ /* if an audiohook got removed, reset samplerate compatibility */
+ if (removed) {
+ audiohook_list_set_samplerate_compatibility(audiohook_list);
+ }
return frame;
}
-/*! \brief Pass an AUDIO frame off to be handled by the audiohook core
+static struct ast_frame *audiohook_list_translate_to_slin(struct ast_audiohook_list *audiohook_list,
+ enum ast_audiohook_direction direction, struct ast_frame *frame)
+{
+ struct ast_audiohook_translate *in_translate = (direction == AST_AUDIOHOOK_DIRECTION_READ ?
+ &audiohook_list->in_translate[0] : &audiohook_list->in_translate[1]);
+ struct ast_frame *new_frame = frame;
+ struct ast_format *slin;
+
+ /*
+ * If we are capable of sample rates other that 8khz, update the internal
+ * audiohook_list's rate and higher sample rate audio arrives. If native
+ * slin compatibility is turned on all audiohooks in the list will be
+ * updated as well during read/write processing.
+ */
+ audiohook_list->list_internal_samp_rate =
+ MAX(ast_format_get_sample_rate(frame->subclass.format), audiohook_list->list_internal_samp_rate);
+
+ slin = ast_format_cache_get_slin_by_rate(audiohook_list->list_internal_samp_rate);
+ if (ast_format_cmp(frame->subclass.format, slin) == AST_FORMAT_CMP_EQUAL) {
+ return new_frame;
+ }
+
+ if (!in_translate->format ||
+ ast_format_cmp(frame->subclass.format, in_translate->format) != AST_FORMAT_CMP_EQUAL) {
+ struct ast_trans_pvt *new_trans;
+
+ new_trans = ast_translator_build_path(slin, frame->subclass.format);
+ if (!new_trans) {
+ return NULL;
+ }
+
+ if (in_translate->trans_pvt) {
+ ast_translator_free_path(in_translate->trans_pvt);
+ }
+ in_translate->trans_pvt = new_trans;
+
+ ao2_replace(in_translate->format, frame->subclass.format);
+ }
+
+ if (!(new_frame = ast_translate(in_translate->trans_pvt, frame, 0))) {
+ return NULL;
+ }
+
+ return new_frame;
+}
+
+static struct ast_frame *audiohook_list_translate_to_native(struct ast_audiohook_list *audiohook_list,
+ enum ast_audiohook_direction direction, struct ast_frame *slin_frame, struct ast_format *outformat)
+{
+ struct ast_audiohook_translate *out_translate = (direction == AST_AUDIOHOOK_DIRECTION_READ ? &audiohook_list->out_translate[0] : &audiohook_list->out_translate[1]);
+ struct ast_frame *outframe = NULL;
+ if (ast_format_cmp(slin_frame->subclass.format, outformat) == AST_FORMAT_CMP_NOT_EQUAL) {
+ /* rebuild translators if necessary */
+ if (ast_format_cmp(out_translate->format, outformat) == AST_FORMAT_CMP_NOT_EQUAL) {
+ if (out_translate->trans_pvt) {
+ ast_translator_free_path(out_translate->trans_pvt);
+ }
+ if (!(out_translate->trans_pvt = ast_translator_build_path(outformat, slin_frame->subclass.format))) {
+ return NULL;
+ }
+ ao2_replace(out_translate->format, outformat);
+ }
+ /* translate back to the format the frame came in as. */
+ if (!(outframe = ast_translate(out_translate->trans_pvt, slin_frame, 0))) {
+ return NULL;
+ }
+ }
+ return outframe;
+}
+
+/*!
+ *\brief Set the audiohook's internal sample rate to the audiohook_list's rate,
+ * but only when native slin compatibility is turned on.
+ *
+ * \param audiohook_list audiohook_list data object
+ * \param audiohook the audiohook to update
+ * \param rate the current max internal sample rate
+ */
+static void audiohook_list_set_hook_rate(struct ast_audiohook_list *audiohook_list,
+ struct ast_audiohook *audiohook, int *rate)
+{
+ /* The rate should always be the max between itself and the hook */
+ if (audiohook->hook_internal_samp_rate > *rate) {
+ *rate = audiohook->hook_internal_samp_rate;
+ }
+
+ /*
+ * If native slin compatibility is turned on then update the audiohook
+ * with the audiohook_list's current rate. Note, the audiohook's rate is
+ * set to the audiohook_list's rate and not the given rate. If there is
+ * a change in rate the hook's rate is changed on its next check.
+ */
+ if (audiohook_list->native_slin_compatible) {
+ ast_set_flag(audiohook, AST_AUDIOHOOK_COMPATIBLE);
+ audiohook_set_internal_rate(audiohook, audiohook_list->list_internal_samp_rate, 1);
+ } else {
+ ast_clear_flag(audiohook, AST_AUDIOHOOK_COMPATIBLE);
+ }
+}
+
+/*!
+ * \brief Pass an AUDIO frame off to be handled by the audiohook core
+ *
+ * \details
+ * This function has 3 ast_frames and 3 parts to handle each. At the beginning of this
+ * function all 3 frames, start_frame, middle_frame, and end_frame point to the initial
+ * input frame.
+ *
+ * Part_1: Translate the start_frame into SLINEAR audio if it is not already in that
+ * format. The result of this part is middle_frame is guaranteed to be in
+ * SLINEAR format for Part_2.
+ * Part_2: Send middle_frame off to spies and manipulators. At this point middle_frame is
+ * either a new frame as result of the translation, or points directly to the start_frame
+ * because no translation to SLINEAR audio was required.
+ * Part_3: Translate end_frame's audio back into the format of start frame if necessary. This
+ * is only necessary if manipulation of middle_frame occurred.
+ *
* \param chan Channel that the list is coming off of
* \param audiohook_list List of audiohooks
* \param direction Direction frame is coming in from
*/
static struct ast_frame *audio_audiohook_write_list(struct ast_channel *chan, struct ast_audiohook_list *audiohook_list, enum ast_audiohook_direction direction, struct ast_frame *frame)
{
- struct ast_audiohook_translate *in_translate = (direction == AST_AUDIOHOOK_DIRECTION_READ ? &audiohook_list->in_translate[0] : &audiohook_list->in_translate[1]);
- struct ast_audiohook_translate *out_translate = (direction == AST_AUDIOHOOK_DIRECTION_READ ? &audiohook_list->out_translate[0] : &audiohook_list->out_translate[1]);
struct ast_frame *start_frame = frame, *middle_frame = frame, *end_frame = frame;
struct ast_audiohook *audiohook = NULL;
- int samples = frame->samples;
-
- /* If the frame coming in is not signed linear we have to send it through the in_translate path */
- if (frame->subclass.codec != AST_FORMAT_SLINEAR) {
- if (in_translate->format != frame->subclass.codec) {
- if (in_translate->trans_pvt)
- ast_translator_free_path(in_translate->trans_pvt);
- if (!(in_translate->trans_pvt = ast_translator_build_path(AST_FORMAT_SLINEAR, frame->subclass.codec)))
- return frame;
- in_translate->format = frame->subclass.codec;
+ int samples;
+ int middle_frame_manipulated = 0;
+ int removed = 0;
+ int internal_sample_rate;
+
+ /* ---Part_1. translate start_frame to SLINEAR if necessary. */
+ if (!(middle_frame = audiohook_list_translate_to_slin(audiohook_list, direction, start_frame))) {
+ return frame;
+ }
+
+ /* If the translation resulted in an interpolated frame then immediately return as audiohooks
+ * rely on actual media being present to do things.
+ */
+ if (!middle_frame->data.ptr) {
+ if (middle_frame != start_frame) {
+ ast_frfree(middle_frame);
}
- if (!(middle_frame = ast_translate(in_translate->trans_pvt, frame, 0)))
- return frame;
- samples = middle_frame->samples;
+ return start_frame;
}
+ samples = middle_frame->samples;
+
+ /*
+ * While processing each audiohook check to see if the internal sample rate needs
+ * to be adjusted (it should be the highest rate specified between formats and
+ * hooks). The given audiohook_list's internal sample rate is then set to the
+ * updated value before returning.
+ *
+ * If slin compatibility mode is turned on then an audiohook's internal sample
+ * rate is set to its audiohook_list's rate. If an audiohook_list's rate is
+ * adjusted during this pass then the change is picked up by the audiohooks
+ * on the next pass.
+ */
+ internal_sample_rate = audiohook_list->list_internal_samp_rate;
+
+ /* ---Part_2: Send middle_frame to spy and manipulator lists. middle_frame is guaranteed to be SLINEAR here.*/
/* Queue up signed linear frame to each spy */
AST_LIST_TRAVERSE_SAFE_BEGIN(&audiohook_list->spy_list, audiohook, list) {
ast_audiohook_lock(audiohook);
if (audiohook->status != AST_AUDIOHOOK_STATUS_RUNNING) {
AST_LIST_REMOVE_CURRENT(list);
+ removed = 1;
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_DONE);
ast_audiohook_unlock(audiohook);
+ if (ast_channel_is_bridged(chan)) {
+ ast_channel_set_unbridged_nolock(chan, 1);
+ }
continue;
}
+ audiohook_list_set_hook_rate(audiohook_list, audiohook, &internal_sample_rate);
ast_audiohook_write_frame(audiohook, direction, middle_frame);
ast_audiohook_unlock(audiohook);
}
AST_LIST_TRAVERSE_SAFE_END;
/* If this frame is being written out to the channel then we need to use whisper sources */
- if (direction == AST_AUDIOHOOK_DIRECTION_WRITE && !AST_LIST_EMPTY(&audiohook_list->whisper_list)) {
+ if (!AST_LIST_EMPTY(&audiohook_list->whisper_list)) {
int i = 0;
short read_buf[samples], combine_buf[samples], *data1 = NULL, *data2 = NULL;
memset(&combine_buf, 0, sizeof(combine_buf));
AST_LIST_TRAVERSE_SAFE_BEGIN(&audiohook_list->whisper_list, audiohook, list) {
+ struct ast_slinfactory *factory = (direction == AST_AUDIOHOOK_DIRECTION_READ ? &audiohook->read_factory : &audiohook->write_factory);
ast_audiohook_lock(audiohook);
if (audiohook->status != AST_AUDIOHOOK_STATUS_RUNNING) {
AST_LIST_REMOVE_CURRENT(list);
+ removed = 1;
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_DONE);
ast_audiohook_unlock(audiohook);
+ if (ast_channel_is_bridged(chan)) {
+ ast_channel_set_unbridged_nolock(chan, 1);
+ }
continue;
}
- if (ast_slinfactory_available(&audiohook->write_factory) >= samples && ast_slinfactory_read(&audiohook->write_factory, read_buf, samples)) {
+ audiohook_list_set_hook_rate(audiohook_list, audiohook, &internal_sample_rate);
+ if (ast_slinfactory_available(factory) >= samples && ast_slinfactory_read(factory, read_buf, samples)) {
/* Take audio from this whisper source and combine it into our main buffer */
- for (i = 0, data1 = combine_buf, data2 = read_buf; i < samples; i++, data1++, data2++)
+ for (i = 0, data1 = combine_buf, data2 = read_buf; i < samples; i++, data1++, data2++) {
ast_slinear_saturated_add(data1, data2);
+ }
}
ast_audiohook_unlock(audiohook);
}
AST_LIST_TRAVERSE_SAFE_END;
/* We take all of the combined whisper sources and combine them into the audio being written out */
- for (i = 0, data1 = middle_frame->data.ptr, data2 = combine_buf; i < samples; i++, data1++, data2++)
+ for (i = 0, data1 = middle_frame->data.ptr, data2 = combine_buf; i < samples; i++, data1++, data2++) {
ast_slinear_saturated_add(data1, data2);
- end_frame = middle_frame;
+ }
+ middle_frame_manipulated = 1;
}
/* Pass off frame to manipulate audiohooks */
ast_audiohook_lock(audiohook);
if (audiohook->status != AST_AUDIOHOOK_STATUS_RUNNING) {
AST_LIST_REMOVE_CURRENT(list);
+ removed = 1;
ast_audiohook_update_status(audiohook, AST_AUDIOHOOK_STATUS_DONE);
ast_audiohook_unlock(audiohook);
/* We basically drop all of our links to the manipulate audiohook and prod it to do it's own destructive things */
audiohook->manipulate_callback(audiohook, chan, NULL, direction);
+ if (ast_channel_is_bridged(chan)) {
+ ast_channel_set_unbridged_nolock(chan, 1);
+ }
continue;
}
- /* Feed in frame to manipulation */
- if (audiohook->manipulate_callback(audiohook, chan, middle_frame, direction)) {
- ast_frfree(middle_frame);
- middle_frame = NULL;
+ audiohook_list_set_hook_rate(audiohook_list, audiohook, &internal_sample_rate);
+ /*
+ * Feed in frame to manipulation.
+ */
+ if (!audiohook->manipulate_callback(audiohook, chan, middle_frame, direction)) {
+ /*
+ * XXX FAILURES ARE IGNORED XXX
+ * If the manipulation fails then the frame will be returned in its original state.
+ * Since there are potentially more manipulator callbacks in the list, no action should
+ * be taken here to exit early.
+ */
+ middle_frame_manipulated = 1;
}
ast_audiohook_unlock(audiohook);
}
AST_LIST_TRAVERSE_SAFE_END;
- if (middle_frame) {
- end_frame = middle_frame;
- }
}
- /* Now we figure out what to do with our end frame (whether to transcode or not) */
- if (middle_frame == end_frame) {
- /* Middle frame was modified and became the end frame... let's see if we need to transcode */
- if (end_frame->subclass.codec != start_frame->subclass.codec) {
- if (out_translate->format != start_frame->subclass.codec) {
- if (out_translate->trans_pvt)
- ast_translator_free_path(out_translate->trans_pvt);
- if (!(out_translate->trans_pvt = ast_translator_build_path(start_frame->subclass.codec, AST_FORMAT_SLINEAR))) {
- /* We can't transcode this... drop our middle frame and return the original */
- ast_frfree(middle_frame);
- return start_frame;
- }
- out_translate->format = start_frame->subclass.codec;
- }
- /* Transcode from our middle (signed linear) frame to new format of the frame that came in */
- if (!(end_frame = ast_translate(out_translate->trans_pvt, middle_frame, 0))) {
- /* Failed to transcode the frame... drop it and return the original */
- ast_frfree(middle_frame);
- return start_frame;
- }
- /* Here's the scoop... middle frame is no longer of use to us */
- ast_frfree(middle_frame);
+ /* ---Part_3: Decide what to do with the end_frame (whether to transcode or not) */
+ if (middle_frame_manipulated) {
+ if (!(end_frame = audiohook_list_translate_to_native(audiohook_list, direction, middle_frame, start_frame->subclass.format))) {
+ /* translation failed, so just pass back the input frame */
+ end_frame = start_frame;
}
} else {
- /* No frame was modified, we can just drop our middle frame and pass the frame we got in out */
- if (middle_frame) {
- ast_frfree(middle_frame);
- }
+ end_frame = start_frame;
+ }
+ /* clean up our middle_frame if required */
+ if (middle_frame != end_frame) {
+ ast_frfree(middle_frame);
+ middle_frame = NULL;
+ }
+
+ /* Before returning, if an audiohook got removed, reset samplerate compatibility */
+ if (removed) {
+ audiohook_list_set_samplerate_compatibility(audiohook_list);
+ } else {
+ /*
+ * Set the audiohook_list's rate to the updated rate. Note that if a hook
+ * was removed then the list's internal rate is reset to the default.
+ */
+ audiohook_list->list_internal_samp_rate = internal_sample_rate;
}
return end_frame;
}
+int ast_audiohook_write_list_empty(struct ast_audiohook_list *audiohook_list)
+{
+ return !audiohook_list
+ || (AST_LIST_EMPTY(&audiohook_list->spy_list)
+ && AST_LIST_EMPTY(&audiohook_list->whisper_list)
+ && AST_LIST_EMPTY(&audiohook_list->manipulate_list));
+}
+
/*! \brief Pass a frame off to be handled by the audiohook core
* \param chan Channel that the list is coming off of
* \param audiohook_list List of audiohooks
struct ast_frame *ast_audiohook_write_list(struct ast_channel *chan, struct ast_audiohook_list *audiohook_list, enum ast_audiohook_direction direction, struct ast_frame *frame)
{
/* Pass off frame to it's respective list write function */
- if (frame->frametype == AST_FRAME_VOICE)
+ if (frame->frametype == AST_FRAME_VOICE) {
return audio_audiohook_write_list(chan, audiohook_list, direction, frame);
- else if (frame->frametype == AST_FRAME_DTMF)
+ } else if (frame->frametype == AST_FRAME_DTMF) {
return dtmf_audiohook_write_list(chan, audiohook_list, direction, frame);
- else
+ } else {
return frame;
+ }
}
-
/*! \brief Wait for audiohook trigger to be triggered
* \param audiohook Audiohook to wait on
wait = ast_tvadd(ast_tvnow(), ast_samp2tv(50000, 1000));
ts.tv_sec = wait.tv_sec;
ts.tv_nsec = wait.tv_usec * 1000;
-
+
ast_cond_timedwait(&audiohook->trigger, &audiohook->lock, &ts);
-
+
return;
}
int count = 0;
struct ast_audiohook *ah = NULL;
- if (!chan->audiohooks)
+ if (!ast_channel_audiohooks(chan)) {
return -1;
+ }
switch (type) {
case AST_AUDIOHOOK_TYPE_SPY:
- AST_LIST_TRAVERSE_SAFE_BEGIN(&chan->audiohooks->spy_list, ah, list) {
+ AST_LIST_TRAVERSE(&ast_channel_audiohooks(chan)->spy_list, ah, list) {
if (!strcmp(ah->source, source)) {
count++;
}
}
- AST_LIST_TRAVERSE_SAFE_END;
break;
case AST_AUDIOHOOK_TYPE_WHISPER:
- AST_LIST_TRAVERSE_SAFE_BEGIN(&chan->audiohooks->whisper_list, ah, list) {
+ AST_LIST_TRAVERSE(&ast_channel_audiohooks(chan)->whisper_list, ah, list) {
if (!strcmp(ah->source, source)) {
count++;
}
}
- AST_LIST_TRAVERSE_SAFE_END;
break;
case AST_AUDIOHOOK_TYPE_MANIPULATE:
- AST_LIST_TRAVERSE_SAFE_BEGIN(&chan->audiohooks->manipulate_list, ah, list) {
+ AST_LIST_TRAVERSE(&ast_channel_audiohooks(chan)->manipulate_list, ah, list) {
if (!strcmp(ah->source, source)) {
count++;
}
}
- AST_LIST_TRAVERSE_SAFE_END;
break;
default:
- ast_log(LOG_DEBUG, "Invalid audiohook type supplied, (%d)\n", type);
+ ast_debug(1, "Invalid audiohook type supplied, (%u)\n", type);
return -1;
}
{
int count = 0;
struct ast_audiohook *ah = NULL;
- if (!chan->audiohooks)
+ if (!ast_channel_audiohooks(chan))
return -1;
switch (type) {
case AST_AUDIOHOOK_TYPE_SPY:
- AST_LIST_TRAVERSE_SAFE_BEGIN(&chan->audiohooks->spy_list, ah, list) {
+ AST_LIST_TRAVERSE(&ast_channel_audiohooks(chan)->spy_list, ah, list) {
if ((!strcmp(ah->source, source)) && (ah->status == AST_AUDIOHOOK_STATUS_RUNNING))
count++;
}
- AST_LIST_TRAVERSE_SAFE_END;
break;
case AST_AUDIOHOOK_TYPE_WHISPER:
- AST_LIST_TRAVERSE_SAFE_BEGIN(&chan->audiohooks->whisper_list, ah, list) {
+ AST_LIST_TRAVERSE(&ast_channel_audiohooks(chan)->whisper_list, ah, list) {
if ((!strcmp(ah->source, source)) && (ah->status == AST_AUDIOHOOK_STATUS_RUNNING))
count++;
}
- AST_LIST_TRAVERSE_SAFE_END;
break;
case AST_AUDIOHOOK_TYPE_MANIPULATE:
- AST_LIST_TRAVERSE_SAFE_BEGIN(&chan->audiohooks->manipulate_list, ah, list) {
+ AST_LIST_TRAVERSE(&ast_channel_audiohooks(chan)->manipulate_list, ah, list) {
if ((!strcmp(ah->source, source)) && (ah->status == AST_AUDIOHOOK_STATUS_RUNNING))
count++;
}
- AST_LIST_TRAVERSE_SAFE_END;
break;
default:
- ast_log(LOG_DEBUG, "Invalid audiohook type supplied, (%d)\n", type);
+ ast_debug(1, "Invalid audiohook type supplied, (%u)\n", type);
return -1;
}
return count;
}
/* Setup our audiohook structure so we can manipulate the audio */
- ast_audiohook_init(&audiohook_volume->audiohook, AST_AUDIOHOOK_TYPE_MANIPULATE, "Volume");
+ ast_audiohook_init(&audiohook_volume->audiohook, AST_AUDIOHOOK_TYPE_MANIPULATE, "Volume", AST_AUDIOHOOK_MANIPULATE_ALL_RATES);
audiohook_volume->audiohook.manipulate_callback = audiohook_volume_callback;
/* Attach the audiohook_volume blob to the datastore and attach to the channel */
return 0;
}
+
+/*! \brief Mute frames read from or written to a channel
+ * \param chan Channel to muck with
+ * \param source Type of audiohook
+ * \param flag which flag to set / clear
+ * \param clear set or clear
+ * \return Returns 0 on success, -1 on failure
+ */
+int ast_audiohook_set_mute(struct ast_channel *chan, const char *source, enum ast_audiohook_flags flag, int clear)
+{
+ struct ast_audiohook *audiohook = NULL;
+
+ ast_channel_lock(chan);
+
+ /* Ensure the channel has audiohooks on it */
+ if (!ast_channel_audiohooks(chan)) {
+ ast_channel_unlock(chan);
+ return -1;
+ }
+
+ audiohook = find_audiohook_by_source(ast_channel_audiohooks(chan), source);
+
+ if (audiohook) {
+ if (clear) {
+ ast_clear_flag(audiohook, flag);
+ } else {
+ ast_set_flag(audiohook, flag);
+ }
+ }
+
+ ast_channel_unlock(chan);
+
+ return (audiohook ? 0 : -1);
+}