2 * Experimental support for video sessions. We use SDL for rendering, ffmpeg
3 * as the codec library for encoding and decoding, and Video4Linux and X11
4 * to generate the local video stream.
6 * If one of these pieces is not available, either at compile time or at
7 * runtime, we do our best to run without it. Of course, no codec library
8 * means we can only deal with raw data, no SDL means we cannot do rendering,
9 * no V4L or X11 means we cannot generate data (but in principle we could
10 * stream from or record to a file).
12 * We need a recent (2007.07.12 or newer) version of ffmpeg to avoid warnings.
13 * Older versions might give 'deprecated' messages during compilation,
14 * thus not compiling in AST_DEVMODE, or don't have swscale, in which case
15 * you can try to compile #defining OLD_FFMPEG here.
20 //#define DROP_PACKETS 5 /* if set, drop this % of video packets */
21 //#define OLD_FFMPEG 1 /* set for old ffmpeg with no swscale */
24 #include <sys/ioctl.h>
25 #include <math.h> /* sqrt */
26 #include "asterisk/cli.h"
27 #include "asterisk/file.h"
28 #include "asterisk/channel.h"
30 #include "console_video.h"
33 The code is structured as follows.
35 When a new console channel is created, we call console_video_start()
36 to initialize SDL, the source, and the encoder/ decoder for the
37 formats in use (XXX the latter two should be done later, once the
38 codec negotiation is complete). Also, a thread is created to handle
39 the video source and generate frames.
41 While communication is on, the local source is generated by the
42 video thread, which wakes up periodically, generates frames and
43 enqueues them in chan->readq. Incoming rtp frames are passed to
44 console_write_video(), decoded and passed to SDL for display.
46 For as unfortunate and confusing as it can be, we need to deal with a
47 number of different video representations (size, codec/pixel format,
48 codec parameters), as follows:
50 loc_src is the data coming from the camera/X11/etc.
51 The format is typically constrained by the video source.
53 enc_in is the input required by the encoder.
54 Typically constrained in size by the encoder type.
56 enc_out is the bitstream transmitted over RTP.
57 Typically negotiated while the call is established.
59 loc_dpy is the format used to display the local video source.
60 Depending on user preferences this can have the same size as
61 loc_src_fmt, or enc_in_fmt, or thumbnail size (e.g. PiP output)
63 dec_in is the incoming RTP bitstream. Negotiated
64 during call establishment, it is not necessarily the same as
67 dec_out the output of the decoder.
68 The format is whatever the other side sends, and the
69 buffer is allocated by avcodec_decode_... so we only
72 rem_dpy the format used to display the remote stream
74 We store the format info together with the buffer storing the data.
75 As a future optimization, a format/buffer may reference another one
76 if the formats are equivalent. This will save some unnecessary format
80 In order to handle video you need to add to sip.conf (and presumably
81 iax.conf too) the following:
85 allow=h263 ; this or other video formats
86 allow=h263p ; this or other video formats
91 * Codecs are absolutely necessary or we cannot do anything.
92 * In principle SDL is optional too (used for rendering only, but we
93 * could still source data withouth it), however at the moment it is required.
95 #if !defined(HAVE_VIDEO_CONSOLE) || !defined(HAVE_FFMPEG) || !defined(HAVE_SDL)
96 /* stubs if required pieces are missing */
97 int console_write_video(struct ast_channel *chan, struct ast_frame *f)
99 return 0; /* writing video not supported */
102 int console_video_cli(struct video_desc *env, const char *var, int fd)
104 return 1; /* nothing matched */
107 int console_video_config(struct video_desc **penv, const char *var, const char *val)
109 return 1; /* no configuration */
112 void console_video_start(struct video_desc *env, struct ast_channel *owner)
114 ast_log(LOG_WARNING, "console video support not present\n");
117 void console_video_uninit(struct video_desc *env)
121 int console_video_formats = 0;
123 #else /* defined(HAVE_FFMPEG) && defined(HAVE_SDL) */
125 /*! The list of video formats we support. */
126 int console_video_formats =
127 AST_FORMAT_H263_PLUS | AST_FORMAT_H263 |
128 AST_FORMAT_MP4_VIDEO | AST_FORMAT_H264 | AST_FORMAT_H261 ;
131 #include <X11/Xlib.h> /* this should be conditional */
134 #include <ffmpeg/avcodec.h>
136 #include <ffmpeg/swscale.h> /* requires a recent ffmpeg */
140 #ifdef HAVE_SDL_IMAGE
141 #include <SDL/SDL_image.h> /* for loading images */
144 #include <SDL/SDL_ttf.h> /* render text on sdl surfaces */
148 * In many places we use buffers to store the raw frames (but not only),
149 * so here is a structure to keep all the info. data = NULL means the
150 * structure is not initialized, so the other fields are invalid.
151 * size = 0 means the buffer is not malloc'ed so we don't have to free it.
153 struct fbuf_t { /* frame buffers, dynamically allocated */
154 uint8_t *data; /* memory, malloced if size > 0, just reference
156 int size; /* total size in bytes */
157 int used; /* space used so far */
158 int ebit; /* bits to ignore at the end */
159 int x; /* origin, if necessary */
166 struct video_codec_desc; /* forward declaration */
168 * Descriptor of the local source, made of the following pieces:
169 * + configuration info (geometry, device name, fps...). These are read
170 * from the config file and copied here before calling video_out_init();
171 * + the frame buffer (buf) and source pixel format, allocated at init time;
172 * + the encoding and RTP info, including timestamps to generate
173 * frames at the correct rate;
174 * + source-specific info, i.e. fd for /dev/video, dpy-image for x11, etc,
175 * filled in by video_open
176 * NOTE: loc_src.data == NULL means the rest of the struct is invalid, and
177 * the video source is not available.
179 struct video_out_desc {
180 /* video device support.
181 * videodevice and geometry are read from the config file.
182 * At the right time we try to open it and allocate a buffer.
183 * If we are successful, webcam_bufsize > 0 and we can read.
185 /* all the following is config file info copied from the parent */
186 char videodevice[64];
193 struct fbuf_t loc_src; /* local source buffer, allocated in video_open() */
194 struct fbuf_t enc_in; /* encoder input buffer, allocated in video_out_init() */
195 struct fbuf_t enc_out; /* encoder output buffer, allocated in video_out_init() */
196 struct fbuf_t loc_dpy; /* display source buffer, no buffer (managed by SDL in bmp[1]) */
197 struct fbuf_t keypad_dpy; /* keypad source buffer, XXX */
199 struct video_codec_desc *enc; /* encoder */
200 AVCodecContext *enc_ctx; /* encoding context */
202 AVFrame *frame; /* The initial part is an AVPicture */
204 struct timeval last_frame; /* when we read the last frame ? */
206 /* device specific info */
207 int fd; /* file descriptor, for webcam */
209 Display *dpy; /* x11 grabber info */
211 int screen_width; /* width of X screen */
212 int screen_height; /* height of X screen */
217 * Descriptor for the incoming stream, with a buffer for the bitstream
218 * extracted by the RTP packets, RTP reassembly info, and a frame buffer
219 * for the decoded frame (buf).
220 * and store the result in a suitable frame buffer for later display.
221 * NOTE: dec_ctx == NULL means the rest is invalid (e.g. because no
222 * codec, no memory, etc.) and we must drop all incoming frames.
224 * Incoming payload is stored in one of the dec_in[] buffers, which are
225 * emptied by the video thread. These buffers are organized in a circular
226 * queue, with dec_in_cur being the buffer in use by the incoming stream,
227 * and dec_in_dpy is the one being displayed. When the pointers need to
228 * be changed, we synchronize the access to them with dec_in_lock.
229 * When the list is full dec_in_cur = NULL (we cannot store new data),
230 * when the list is empty dec_in_dpy is NULL (we cannot display frames).
232 struct video_in_desc {
233 struct video_codec_desc *dec; /* decoder */
234 AVCodecContext *dec_ctx; /* information about the codec in the stream */
235 AVCodec *codec; /* reference to the codec */
236 AVFrame *d_frame; /* place to store the decoded frame */
237 AVCodecParserContext *parser;
238 uint16_t next_seq; /* must be 16 bit */
239 int discard; /* flag for discard status */
240 #define N_DEC_IN 3 /* number of incoming buffers */
241 struct fbuf_t *dec_in_cur; /* buffer being filled in */
242 struct fbuf_t *dec_in_dpy; /* buffer to display */
243 ast_mutex_t dec_in_lock;
244 struct fbuf_t dec_in[N_DEC_IN]; /* incoming bitstream, allocated/extended in fbuf_append() */
245 struct fbuf_t dec_out; /* decoded frame, no buffer (data is in AVFrame) */
246 struct fbuf_t rem_dpy; /* display remote image, no buffer (it is in win[WIN_REMOTE].bmp) */
250 * Each codec is defined by a number of callbacks
252 /*! \brief initialize the encoder */
253 typedef int (*encoder_init_f)(struct video_out_desc *v);
255 /*! \brief actually call the encoder */
256 typedef int (*encoder_encode_f)(struct video_out_desc *v);
258 /*! \brief encapsulate the bistream in RTP frames */
259 typedef struct ast_frame *(*encoder_encap_f)(struct video_out_desc *out,
260 struct ast_frame **tail);
262 /*! \brief inizialize the decoder */
263 typedef int (*decoder_init_f)(struct video_in_desc *v);
265 /*! \brief extract the bitstream from RTP frames and store in the fbuf.
266 * return 0 if ok, 1 on error
268 typedef int (*decoder_decap_f)(struct fbuf_t *b, uint8_t *data, int len);
270 /*! \brief actually call the decoder */
271 typedef int (*decoder_decode_f)(struct video_in_desc *v, struct fbuf_t *b);
273 struct video_codec_desc {
274 const char *name; /* format name */
275 int format; /* AST_FORMAT_* */
276 encoder_init_f enc_init;
277 encoder_encap_f enc_encap;
278 encoder_encode_f enc_run;
279 decoder_init_f dec_init;
280 decoder_decap_f dec_decap;
281 decoder_decode_f dec_run;
284 /* our representation of a displayed window. SDL can only do one main
285 * window so we map everything within that one
287 enum { WIN_LOCAL, WIN_REMOTE, WIN_KEYPAD, WIN_MAX };
289 struct display_window {
291 SDL_Rect rect; /* loc. of images */
294 #define GUI_BUFFER_LEN 256 /* buffer lenght used for input buffers */
296 enum kp_type { KP_NONE, KP_RECT, KP_CIRCLE };
297 struct keypad_entry {
298 int c; /* corresponding character */
299 int x0, y0, x1, y1, h; /* arguments */
303 /*! \brief info related to the gui: button status, mouse coords, etc. */
305 char inbuf[GUI_BUFFER_LEN]; /* buffer for to-dial buffer */
306 int inbuf_pos; /* next free position in inbuf */
307 char msgbuf[GUI_BUFFER_LEN]; /* buffer for text-message buffer */
308 int msgbuf_pos; /* next free position in msgbuf */
309 int text_mode; /* switch to-dial and text-message mode */
310 int drag_mode; /* switch phone and drag-source mode */
311 int x_drag; /* x coordinate where the drag starts */
312 int y_drag; /* y coordinate where the drag starts */
314 TTF_Font *font; /* font to be used */
316 int outfd; /* fd for output */
317 SDL_Surface *keypad; /* the pixmap for the keypad */
318 int kp_size, kp_used;
319 struct keypad_entry *kp;
323 * The overall descriptor, with room for config info, video source and
324 * received data descriptors, SDL info, etc.
327 char codec_name[64]; /* the codec we use */
329 pthread_t vthread; /* video thread */
330 int shutdown; /* set to shutdown vthread */
331 struct ast_channel *owner; /* owner channel */
333 struct video_in_desc in; /* remote video descriptor */
334 struct video_out_desc out; /* local video descriptor */
338 /* support for display. */
341 SDL_Surface *screen; /* the main window */
342 char keypad_file[256]; /* image for the keypad */
343 char keypad_mask[256]; /* background for the keypad */
344 char keypad_font[256]; /* font for the keypad */
345 struct display_window win[WIN_MAX];
348 static AVPicture *fill_pict(struct fbuf_t *b, AVPicture *p);
350 static void fbuf_free(struct fbuf_t *b)
352 struct fbuf_t x = *b;
354 if (b->data && b->size)
356 bzero(b, sizeof(*b));
357 /* restore some fields */
360 b->pix_fmt = x.pix_fmt;
364 * Append a chunk of data to a buffer taking care of bit alignment
365 * Return 0 on success, != 0 on failure
367 static int fbuf_append(struct fbuf_t *b, uint8_t *src, int len,
371 * Allocate buffer. ffmpeg wants an extra FF_INPUT_BUFFER_PADDING_SIZE,
372 * and also wants 0 as a buffer terminator to prevent trouble.
374 int need = len + FF_INPUT_BUFFER_PADDING_SIZE;
378 if (b->data == NULL) {
382 b->data = ast_calloc(1, b->size);
383 } else if (b->used + need > b->size) {
384 b->size = b->used + need;
385 b->data = ast_realloc(b->data, b->size);
387 if (b->data == NULL) {
388 ast_log(LOG_WARNING, "alloc failure for %d, discard\n",
392 if (b->used == 0 && b->ebit != 0) {
393 ast_log(LOG_WARNING, "ebit not reset at start\n");
396 dst = b->data + b->used;
397 i = b->ebit + sbit; /* bits to ignore around */
398 if (i == 0) { /* easy case, just append */
399 /* do everything in the common block */
400 } else if (i == 8) { /* easy too, just handle the overlap byte */
401 mask = (1 << b->ebit) - 1;
402 /* update the last byte in the buffer */
403 dst[-1] &= ~mask; /* clear bits to ignore */
404 dst[-1] |= (*src & mask); /* append new bits */
405 src += 1; /* skip and prepare for common block */
407 } else { /* must shift the new block, not done yet */
408 ast_log(LOG_WARNING, "must handle shift %d %d at %d\n",
409 b->ebit, sbit, b->used);
412 memcpy(dst, src, len);
415 b->data[b->used] = 0; /* padding */
420 * Build an ast_frame for a given chunk of data, and link it into
421 * the queue, with possibly 'head' bytes at the beginning to
422 * fill in some fields later.
424 static struct ast_frame *create_video_frame(uint8_t *start, uint8_t *end,
425 int format, int head, struct ast_frame *prev)
431 data = ast_calloc(1, len+head);
432 f = ast_calloc(1, sizeof(*f));
433 if (f == NULL || data == NULL) {
434 ast_log(LOG_WARNING, "--- frame error f %p data %p len %d format %d\n",
435 f, data, len, format);
442 memcpy(data+head, start, len);
444 f->mallocd = AST_MALLOCD_DATA | AST_MALLOCD_HDR;
445 //f->has_timing_info = 1;
446 //f->ts = ast_tvdiff_ms(ast_tvnow(), out->ts);
447 f->datalen = len+head;
448 f->frametype = AST_FRAME_VIDEO;
449 f->subclass = format;
453 f->delivery.tv_sec = 0;
454 f->delivery.tv_usec = 0;
456 AST_LIST_NEXT(f, frame_list) = NULL;
459 AST_LIST_NEXT(prev, frame_list) = f;
464 /* some debugging code to check the bitstream:
465 * declare a bit buffer, initialize it, and fetch data from it.
469 int bitsize; /* total size in bits */
470 int ofs; /* next bit to read */
473 static struct bitbuf bitbuf_init(const uint8_t *base, int bitsize, int start_ofs)
482 static int bitbuf_left(struct bitbuf *b)
484 return b->bitsize - b->ofs;
487 static uint32_t getbits(struct bitbuf *b, int n)
494 ast_log(LOG_WARNING, "too many bits %d, max 32\n", n);
497 if (n + b->ofs > b->bitsize) {
498 ast_log(LOG_WARNING, "bitbuf overflow %d of %d\n", n + b->ofs, b->bitsize);
499 n = b->bitsize - b->ofs;
501 ofs = 7 - b->ofs % 8; /* start from msb */
503 d = b->base + b->ofs / 8; /* current byte */
504 for (i=0 ; i < n; i++) {
505 retval += retval + (*d & mask ? 1 : 0); /* shift in new byte */
516 static void check_h261(struct fbuf_t *b)
518 struct bitbuf a = bitbuf_init(b->data, b->used * 8, 0);
521 x = getbits(&a, 20); /* PSC, 0000 0000 0000 0001 0000 */
523 ast_log(LOG_WARNING, "bad PSC 0x%x\n", x);
526 x = getbits(&a, 5); /* temporal reference */
527 y = getbits(&a, 6); /* ptype */
529 ast_log(LOG_WARNING, "size %d TR %d PTY spl %d doc %d freeze %d %sCIF hi %d\n",
535 (y & 0x4) ? "" : "Q",
537 while ( (x = getbits(&a, 1)) == 1)
538 ast_log(LOG_WARNING, "PSPARE 0x%x\n", getbits(&a, 8));
539 // ast_log(LOG_WARNING, "PSPARE 0 - start GOB LAYER\n");
540 while ( (x = bitbuf_left(&a)) > 0) {
541 // ast_log(LOG_WARNING, "GBSC %d bits left\n", x);
542 x = getbits(&a, 16); /* GBSC 0000 0000 0000 0001 */
544 ast_log(LOG_WARNING, "bad GBSC 0x%x\n", x);
547 x = getbits(&a, 4); /* group number */
548 y = getbits(&a, 5); /* gquant */
550 ast_log(LOG_WARNING, " bad GN %d\n", x);
553 while ( (x = getbits(&a, 1)) == 1)
554 ast_log(LOG_WARNING, "GSPARE 0x%x\n", getbits(&a, 8));
555 while ( (x = bitbuf_left(&a)) > 0) { /* MB layer */
561 void dump_buf(struct fbuf_t *b);
562 void dump_buf(struct fbuf_t *b)
564 int i, x, last2lines;
567 last2lines = (b->used - 16) & ~0xf;
568 ast_log(LOG_WARNING, "buf size %d of %d\n", b->used, b->size);
569 for (i = 0; i < b->used; i++) {
571 if ( x == 0) { /* new line */
573 ast_log(LOG_WARNING, "%s\n", buf);
574 bzero(buf, sizeof(buf));
575 sprintf(buf, "%04x: ", i);
577 sprintf(buf + 6 + x*3, "%02x ", b->data[i]);
578 if (i > 31 && i < last2lines)
582 ast_log(LOG_WARNING, "%s\n", buf);
585 * Here starts the glue code for the various supported video codecs.
586 * For each of them, we need to provide routines for initialization,
587 * calling the encoder, encapsulating the bitstream in ast_frames,
588 * extracting payload from ast_frames, and calling the decoder.
591 /*--- h263+ support --- */
593 /*! \brief initialization of h263p */
594 static int h263p_enc_init(struct video_out_desc *v)
596 /* modes supported are
597 - Unrestricted Motion Vector (annex D)
598 - Advanced Prediction (annex F)
599 - Advanced Intra Coding (annex I)
600 - Deblocking Filter (annex J)
601 - Slice Structure (annex K)
602 - Alternative Inter VLC (annex S)
603 - Modified Quantization (annex T)
605 v->enc_ctx->flags |=CODEC_FLAG_H263P_UMV; /* annex D */
606 v->enc_ctx->flags |=CODEC_FLAG_AC_PRED; /* annex f ? */
607 v->enc_ctx->flags |=CODEC_FLAG_H263P_SLICE_STRUCT; /* annex k */
608 v->enc_ctx->flags |= CODEC_FLAG_H263P_AIC; /* annex I */
610 v->enc_ctx->gop_size = v->fps*5; // emit I frame every 5 seconds
616 * Create RTP/H.263 fragments to avoid IP fragmentation. We fragment on a
617 * PSC or a GBSC, but if we don't find a suitable place just break somewhere.
618 * Everything is byte-aligned.
620 static struct ast_frame *h263p_encap(struct video_out_desc *out,
621 struct ast_frame **tail)
623 struct ast_frame *cur = NULL, *first = NULL;
624 uint8_t *d = out->enc_out.data;
625 int len = out->enc_out.used;
626 int l = len; /* size of the current fragment. If 0, must look for a psc */
628 for (;len > 0; len -= l, d += l) {
633 if (len >= 3 && d[0] == 0 && d[1] == 0 && d[2] >= 0x80) {
634 /* we are starting a new block, so look for a PSC. */
635 for (i = 3; i < len - 3; i++) {
636 if (d[i] == 0 && d[i+1] == 0 && d[i+2] >= 0x80) {
642 if (l > out->mtu || l > len) { /* psc not found, split */
643 l = MIN(len, out->mtu);
645 if (l < 1 || l > out->mtu) {
646 ast_log(LOG_WARNING, "--- frame error l %d\n", l);
650 if (d[0] == 0 && d[1] == 0) { /* we start with a psc */
652 } else { /* no psc, create a header */
656 f = create_video_frame(d, d+l, AST_FORMAT_H263_PLUS, h, cur);
661 if (h == 0) { /* we start with a psc */
662 data[0] |= 0x04; // set P == 1, and we are done
663 } else { /* no psc, create a header */
664 data[0] = data[1] = 0; // P == 0
673 cur->subclass |= 1; // RTP Marker
675 *tail = cur; /* end of the list */
679 /*! \brief extract the bitstreem from the RTP payload.
680 * This is format dependent.
681 * For h263+, the format is defined in RFC 2429
682 * and basically has a fixed 2-byte header as follows:
683 * 5 bits RR reserved, shall be 0
684 * 1 bit P indicate a start/end condition,
685 * in which case the payload should be prepended
686 * by two zero-valued bytes.
687 * 1 bit V there is an additional VRC header after this header
688 * 6 bits PLEN length in bytes of extra picture header
689 * 3 bits PEBIT how many bits to be ignored in the last byte
691 * XXX the code below is not complete.
693 static int h263p_decap(struct fbuf_t *b, uint8_t *data, int len)
698 ast_log(LOG_WARNING, "invalid framesize %d\n", len);
701 PLEN = ( (data[0] & 1) << 5 ) | ( (data[1] & 0xf8) >> 3);
707 if (data[0] & 4) /* bit P */
708 data[0] = data[1] = 0;
713 return fbuf_append(b, data, len, 0, 0); /* ignore trail bits */
718 * generic encoder, used by the various protocols supported here.
719 * We assume that the buffer is empty at the beginning.
721 static int ffmpeg_encode(struct video_out_desc *v)
723 struct fbuf_t *b = &v->enc_out;
726 b->used = avcodec_encode_video(v->enc_ctx, b->data, b->size, v->frame);
727 i = avcodec_encode_video(v->enc_ctx, b->data + b->used, b->size - b->used, NULL); /* delayed frames ? */
729 ast_log(LOG_WARNING, "have %d more bytes\n", i);
736 * Generic decoder, which is used by h263p, h263 and h261 as it simply
737 * invokes ffmpeg's decoder.
738 * av_parser_parse should merge a randomly chopped up stream into
739 * proper frames. After that, if we have a valid frame, we decode it
740 * until the entire frame is processed.
742 static int ffmpeg_decode(struct video_in_desc *v, struct fbuf_t *b)
744 uint8_t *src = b->data;
745 int srclen = b->used;
748 if (srclen == 0) /* no data */
752 // ast_log(LOG_WARNING, "rx size %d\n", srclen);
756 int len = av_parser_parse(v->parser, v->dec_ctx, &data, &datalen, src, srclen, 0, 0);
760 /* The parser might return something it cannot decode, so it skips
761 * the block returning no data
763 if (data == NULL || datalen == 0)
765 ret = avcodec_decode_video(v->dec_ctx, v->d_frame, &full_frame, data, datalen);
766 if (full_frame == 1) /* full frame */
769 ast_log(LOG_NOTICE, "Error decoding\n");
773 if (srclen != 0) /* update b with leftover data */
774 bcopy(src, b->data, srclen);
780 static struct video_codec_desc h263p_codec = {
782 .format = AST_FORMAT_H263_PLUS,
783 .enc_init = h263p_enc_init,
784 .enc_encap = h263p_encap,
785 .enc_run = ffmpeg_encode,
787 .dec_decap = h263p_decap,
788 .dec_run = ffmpeg_decode
791 /*--- Plain h263 support --------*/
793 static int h263_enc_init(struct video_out_desc *v)
795 /* XXX check whether these are supported */
796 v->enc_ctx->flags |= CODEC_FLAG_H263P_UMV;
797 v->enc_ctx->flags |= CODEC_FLAG_H263P_AIC;
798 v->enc_ctx->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
799 v->enc_ctx->flags |= CODEC_FLAG_AC_PRED;
801 v->enc_ctx->gop_size = v->fps*5;
807 * h263 encapsulation is specified in RFC2190. There are three modes
808 * defined (A, B, C), with 4, 8 and 12 bytes of header, respectively.
809 * The header is made as follows
810 * 0.....................|.......................|.............|....31
811 * F:1 P:1 SBIT:3 EBIT:3 SRC:3 I:1 U:1 S:1 A:1 R:4 DBQ:2 TRB:3 TR:8
812 * FP = 0- mode A, (only one word of header)
813 * FP = 10 mode B, and also means this is an I or P frame
814 * FP = 11 mode C, and also means this is a PB frame.
815 * SBIT, EBIT nuber of bits to ignore at beginning (msbits) and end (lsbits)
816 * SRC bits 6,7,8 from the h263 PTYPE field
817 * I = 0 intra-coded, 1 = inter-coded (bit 9 from PTYPE)
818 * U = 1 for Unrestricted Motion Vector (bit 10 from PTYPE)
819 * S = 1 for Syntax Based Arith coding (bit 11 from PTYPE)
820 * A = 1 for Advanced Prediction (bit 12 from PTYPE)
821 * R = reserved, must be 0
822 * DBQ = differential quantization, DBQUANT from h263, 0 unless we are using
824 * TRB = temporal reference for bframes, also 0 unless this is a PB frame
825 * TR = temporal reference for P frames, also 0 unless PB frame.
827 * Mode B and mode C description omitted.
829 * An RTP frame can start with a PSC 0000 0000 0000 0000 1000 0
830 * or with a GBSC, which also has the first 17 bits as a PSC.
831 * Note - PSC are byte-aligned, GOB not necessarily. PSC start with
832 * PSC:22 0000 0000 0000 0000 1000 00 picture start code
833 * TR:8 .... .... temporal reference
834 * PTYPE:13 or more ptype...
835 * If we don't fragment a GOB SBIT and EBIT = 0.
838 * The assumption below is that we start with a PSC.
840 static struct ast_frame *h263_encap(struct video_out_desc *out,
841 struct ast_frame **tail)
843 uint8_t *d = out->enc_out.data;
844 int start = 0, i, len = out->enc_out.used;
845 struct ast_frame *f, *cur = NULL, *first = NULL;
846 const int pheader_len = 4; /* Use RFC-2190 Mode A */
847 uint8_t h263_hdr[12]; /* worst case, room for a type c header */
848 uint8_t *h = h263_hdr; /* shorthand */
850 #define H263_MIN_LEN 6
851 if (len < H263_MIN_LEN) /* unreasonably small */
854 bzero(h263_hdr, sizeof(h263_hdr));
855 /* Now set the header bytes. Only type A by now,
856 * and h[0] = h[2] = h[3] = 0 by default.
857 * PTYPE starts 30 bits in the picture, so the first useful
858 * bit for us is bit 36 i.e. within d[4] (0 is the msbit).
859 * SRC = d[4] & 0x1c goes into data[1] & 0xe0
860 * I = d[4] & 0x02 goes into data[1] & 0x10
861 * U = d[4] & 0x01 goes into data[1] & 0x08
862 * S = d[5] & 0x80 goes into data[1] & 0x04
863 * A = d[5] & 0x40 goes into data[1] & 0x02
864 * R = 0 goes into data[1] & 0x01
865 * Optimizing it, we have
867 h[1] = ( (d[4] & 0x1f) << 3 ) | /* SRC, I, U */
868 ( (d[5] & 0xc0) >> 5 ); /* S, A, R */
870 /* now look for the next PSC or GOB header. First try to hit
871 * a '0' byte then look around for the 0000 0000 0000 0000 1 pattern
872 * which is both in the PSC and the GBSC.
874 for (i = H263_MIN_LEN, start = 0; start < len; start = i, i += 3) {
875 //ast_log(LOG_WARNING, "search at %d of %d/%d\n", i, start, len);
876 for (; i < len ; i++) {
877 uint8_t x, rpos, lpos;
878 int rpos_i; /* index corresponding to rpos */
879 if (d[i] != 0) /* cannot be in a GBSC */
884 if (x == 0) /* next is equally good */
886 /* see if around us we can make 16 '0' bits for the GBSC.
887 * Look for the first bit set on the right, and then
888 * see if we have enough 0 on the left.
889 * We are guaranteed to end before rpos == 0
891 for (rpos = 0x80, rpos_i = 8; rpos; rpos >>= 1, rpos_i--)
892 if (x & rpos) /* found the '1' bit in GBSC */
894 x = d[i-1]; /* now look behind */
895 for (lpos = rpos; lpos ; lpos >>= 1)
896 if (x & lpos) /* too early, not a GBSC */
898 if (lpos) /* as i said... */
900 /* now we have a GBSC starting somewhere in d[i-1],
901 * but it might be not byte-aligned
903 if (rpos == 0x80) { /* lucky case */
905 } else { /* XXX to be completed */
906 ast_log(LOG_WARNING, "unaligned GBSC 0x%x %d\n",
911 /* This frame is up to offset i (not inclusive).
912 * We do not split it yet even if larger than MTU.
914 f = create_video_frame(d + start, d+i, AST_FORMAT_H263,
919 bcopy(h, f->data, 4); /* copy the h263 header */
920 /* XXX to do: if not aligned, fix sbit and ebit,
921 * then move i back by 1 for the next frame
929 cur->subclass |= 1; // RTP Marker
935 /* XXX We only drop the header here, but maybe we need more. */
936 static int h263_decap(struct fbuf_t *b, uint8_t *data, int len)
939 ast_log(LOG_WARNING, "invalid framesize %d\n", len);
940 return 1; /* error */
943 if ( (data[0] & 0x80) == 0) {
947 ast_log(LOG_WARNING, "unsupported mode 0x%x\n",
951 return fbuf_append(b, data, len, 0, 0); /* XXX no bit alignment support yet */
954 static struct video_codec_desc h263_codec = {
956 .format = AST_FORMAT_H263,
957 .enc_init = h263_enc_init,
958 .enc_encap = h263_encap,
959 .enc_run = ffmpeg_encode,
961 .dec_decap = h263_decap,
962 .dec_run = ffmpeg_decode
966 /*---- h261 support -----*/
967 static int h261_enc_init(struct video_out_desc *v)
969 /* It is important to set rtp_payload_size = 0, otherwise
970 * ffmpeg in h261 mode will produce output that it cannot parse.
971 * Also try to send I frames more frequently than with other codecs.
973 v->enc_ctx->rtp_payload_size = 0; /* important - ffmpeg fails otherwise */
974 v->enc_ctx->gop_size = v->fps*2; /* be more responsive */
980 * The encapsulation of H261 is defined in RFC4587 which obsoletes RFC2032
981 * The bitstream is preceded by a 32-bit header word:
982 * SBIT:3 EBIT:3 I:1 V:1 GOBN:4 MBAP:5 QUANT:5 HMVD:5 VMVD:5
983 * SBIT and EBIT are the bits to be ignored at beginning and end,
984 * I=1 if the stream has only INTRA frames - cannot change during the stream.
985 * V=0 if motion vector is not used. Cannot change.
986 * GOBN is the GOB number in effect at the start of packet, 0 if we
987 * start with a GOB header
988 * QUANT is the quantizer in effect, 0 if we start with GOB header
989 * HMVD reference horizontal motion vector. 10000 is forbidden
990 * VMVD reference vertical motion vector, as above.
991 * Packetization should occur at GOB boundaries, and if not possible
992 * with MacroBlock fragmentation. However it is likely that blocks
993 * are not bit-aligned so we must take care of this.
995 static struct ast_frame *h261_encap(struct video_out_desc *out,
996 struct ast_frame **tail)
998 uint8_t *d = out->enc_out.data;
999 int start = 0, i, len = out->enc_out.used;
1000 struct ast_frame *f, *cur = NULL, *first = NULL;
1001 const int pheader_len = 4;
1002 uint8_t h261_hdr[4];
1003 uint8_t *h = h261_hdr; /* shorthand */
1004 int sbit = 0, ebit = 0;
1006 #define H261_MIN_LEN 10
1007 if (len < H261_MIN_LEN) /* unreasonably small */
1010 bzero(h261_hdr, sizeof(h261_hdr));
1012 /* Similar to the code in h263_encap, but the marker there is longer.
1013 * Start a few bytes within the bitstream to avoid hitting the marker
1014 * twice. Note we might access the buffer at len, but this is ok because
1015 * the caller has it oversized.
1017 for (i = H261_MIN_LEN, start = 0; start < len - 1; start = i, i += 4) {
1018 #if 0 /* test - disable packetization */
1019 i = len; /* wrong... */
1021 int found = 0, found_ebit = 0; /* last GBSC position found */
1022 for (; i < len ; i++) {
1023 uint8_t x, rpos, lpos;
1024 if (d[i] != 0) /* cannot be in a GBSC */
1027 if (x == 0) /* next is equally good */
1029 /* See if around us we find 15 '0' bits for the GBSC.
1030 * Look for the first bit set on the right, and then
1031 * see if we have enough 0 on the left.
1032 * We are guaranteed to end before rpos == 0
1034 for (rpos = 0x80, ebit = 7; rpos; ebit--, rpos >>= 1)
1035 if (x & rpos) /* found the '1' bit in GBSC */
1037 x = d[i-1]; /* now look behind */
1038 for (lpos = (rpos >> 1); lpos ; lpos >>= 1)
1039 if (x & lpos) /* too early, not a GBSC */
1041 if (lpos) /* as i said... */
1043 /* now we have a GBSC starting somewhere in d[i-1],
1044 * but it might be not byte-aligned. Just remember it.
1046 if (i - start > out->mtu) /* too large, stop now */
1050 i += 4; /* continue forward */
1052 if (i >= len) { /* trim if we went too forward */
1054 ebit = 0; /* hopefully... should ask the bitstream ? */
1056 if (i - start > out->mtu && found) {
1057 /* use the previous GBSC, hope is within the mtu */
1062 if (i - start < 4) /* XXX too short ? */
1064 /* This frame is up to offset i (not inclusive).
1065 * We do not split it yet even if larger than MTU.
1067 f = create_video_frame(d + start, d+i, AST_FORMAT_H261,
1072 /* recompute header with I=0, V=1 */
1073 h[0] = ( (sbit & 7) << 5 ) | ( (ebit & 7) << 2 ) | 1;
1074 bcopy(h, f->data, 4); /* copy the h261 header */
1075 if (ebit) /* not aligned, restart from previous byte */
1077 sbit = (8 - ebit) & 7;
1084 cur->subclass |= 1; // RTP Marker
1091 * Pieces might be unaligned so we really need to put them together.
1093 static int h261_decap(struct fbuf_t *b, uint8_t *data, int len)
1098 ast_log(LOG_WARNING, "invalid framesize %d\n", len);
1101 sbit = (data[0] >> 5) & 7;
1102 ebit = (data[0] >> 2) & 7;
1105 return fbuf_append(b, data, len, sbit, ebit);
1108 static struct video_codec_desc h261_codec = {
1110 .format = AST_FORMAT_H261,
1111 .enc_init = h261_enc_init,
1112 .enc_encap = h261_encap,
1113 .enc_run = ffmpeg_encode,
1115 .dec_decap = h261_decap,
1116 .dec_run = ffmpeg_decode
1120 static int mpeg4_enc_init(struct video_out_desc *v)
1123 //v->enc_ctx->flags |= CODEC_FLAG_LOW_DELAY; /*don't use b frames ?*/
1124 v->enc_ctx->flags |= CODEC_FLAG_AC_PRED;
1125 v->enc_ctx->flags |= CODEC_FLAG_H263P_UMV;
1126 v->enc_ctx->flags |= CODEC_FLAG_QPEL;
1127 v->enc_ctx->flags |= CODEC_FLAG_4MV;
1128 v->enc_ctx->flags |= CODEC_FLAG_GMC;
1129 v->enc_ctx->flags |= CODEC_FLAG_LOOP_FILTER;
1130 v->enc_ctx->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
1132 v->enc_ctx->gop_size = v->fps*5;
1133 v->enc_ctx->rtp_payload_size = 0; /* important - ffmpeg fails otherwise */
1137 /* simplistic encapsulation - just split frames in mtu-size units */
1138 static struct ast_frame *mpeg4_encap(struct video_out_desc *out,
1139 struct ast_frame **tail)
1141 struct ast_frame *f, *cur = NULL, *first = NULL;
1142 uint8_t *d = out->enc_out.data;
1143 uint8_t *end = d+out->enc_out.used;
1146 for (;d < end; d += len, cur = f) {
1147 len = MIN(out->mtu, end-d);
1148 f = create_video_frame(d, d+len, AST_FORMAT_MP4_VIDEO, 0, cur);
1160 static int mpeg4_decap(struct fbuf_t *b, uint8_t *data, int len)
1162 return fbuf_append(b, data, len, 0, 0);
1165 static int mpeg4_decode(struct video_in_desc *v, struct fbuf_t *b)
1167 int full_frame = 0, datalen = b->used;
1168 int ret = avcodec_decode_video(v->dec_ctx, v->d_frame, &full_frame,
1171 ast_log(LOG_NOTICE, "Error decoding\n");
1172 ret = datalen; /* assume we used everything. */
1175 if (datalen > 0) /* update b with leftover bytes */
1176 bcopy(b->data + ret, b->data, datalen);
1182 static struct video_codec_desc mpeg4_codec = {
1184 .format = AST_FORMAT_MP4_VIDEO,
1185 .enc_init = mpeg4_enc_init,
1186 .enc_encap = mpeg4_encap,
1187 .enc_run = ffmpeg_encode,
1189 .dec_decap = mpeg4_decap,
1190 .dec_run = mpeg4_decode
1193 static int h264_enc_init(struct video_out_desc *v)
1195 v->enc_ctx->flags |= CODEC_FLAG_TRUNCATED;
1196 //v->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
1197 //v->enc_ctx->flags2 |= CODEC_FLAG2_FASTPSKIP;
1198 /* TODO: Maybe we need to add some other flags */
1199 v->enc_ctx->gop_size = v->fps*5; // emit I frame every 5 seconds
1200 v->enc_ctx->rtp_mode = 0;
1201 v->enc_ctx->rtp_payload_size = 0;
1202 v->enc_ctx->bit_rate_tolerance = v->enc_ctx->bit_rate;
1206 static int h264_dec_init(struct video_in_desc *v)
1208 v->dec_ctx->flags |= CODEC_FLAG_TRUNCATED;
1214 * The structure of a generic H.264 stream is:
1215 * - 0..n 0-byte(s), unused, optional. one zero-byte is always present
1216 * in the first NAL before the start code prefix.
1217 * - start code prefix (3 bytes): 0x000001
1218 * (the first bytestream has a
1219 * like these 0x00000001!)
1220 * - NAL header byte ( F[1] | NRI[2] | Type[5] ) where type != 0
1222 * - 0..n 0-byte(s) (padding, unused).
1223 * Segmentation in RTP only needs to be done on start code prefixes.
1224 * If fragments are too long... we don't support it yet.
1225 * - encapsulate (or fragment) the byte-stream (with NAL header included)
1227 static struct ast_frame *h264_encap(struct video_out_desc *out,
1228 struct ast_frame **tail)
1230 struct ast_frame *f = NULL, *cur = NULL, *first = NULL;
1231 uint8_t *d, *start = out->enc_out.data;
1232 uint8_t *end = start + out->enc_out.used;
1234 /* Search the first start code prefix - ITU-T H.264 sec. B.2,
1235 * and move start right after that, on the NAL header byte.
1237 #define HAVE_NAL(x) (x[-4] == 0 && x[-3] == 0 && x[-2] == 0 && x[-1] == 1)
1238 for (start += 4; start < end; start++) {
1239 int ty = start[0] & 0x1f;
1240 if (HAVE_NAL(start) && ty != 0 && ty != 31)
1243 /* if not found, or too short, we just skip the next loop and are done. */
1245 /* Here follows the main loop to create frames. Search subsequent start
1246 * codes, and then possibly fragment the unit into smaller fragments.
1248 for (;start < end - 4; start = d) {
1249 int size; /* size of current block */
1250 uint8_t hdr[2]; /* add-on header when fragmenting */
1253 /* now search next nal */
1254 for (d = start + 4; d < end; d++) {
1257 break; /* found NAL */
1259 /* have a block to send. d past the start code unless we overflow */
1260 if (d >= end) { /* NAL not found */
1262 } else if (ty == 0 || ty == 31) { /* found but invalid type, skip */
1263 ast_log(LOG_WARNING, "skip invalid nal type %d at %d of %d\n",
1264 ty, d - out->enc_out.data, out->enc_out.used);
1268 size = d - start - 4; /* don't count the end */
1270 if (size < out->mtu) { // test - don't fragment
1272 f = create_video_frame(start, d - 4, AST_FORMAT_H264, 0, cur);
1282 // Fragmented Unit (Mode A: no DON, very weak)
1283 hdr[0] = (*start & 0xe0) | 28; /* mark as a fragmentation unit */
1284 hdr[1] = (*start++ & 0x1f) | 0x80 ; /* keep type and set START bit */
1285 size--; /* skip the NAL header */
1288 int frag_size = MIN(size, out->mtu);
1290 f = create_video_frame(start, start+frag_size, AST_FORMAT_H264, 2, cur);
1293 size -= frag_size; /* skip this data block */
1298 data[1] = hdr[1] | (size == 0 ? 0x40 : 0); /* end bit if we are done */
1299 hdr[1] &= ~0x80; /* clear start bit for subsequent frames */
1307 cur->subclass |= 1; // RTP Marker
1314 static int h264_decap(struct fbuf_t *b, uint8_t *data, int len)
1316 /* Start Code Prefix (Annex B in specification) */
1317 uint8_t scp[] = { 0x00, 0x00, 0x00, 0x01 };
1322 ast_log(LOG_WARNING, "--- invalid len %d\n", len);
1325 /* first of all, check if the packet has F == 0 */
1326 if (data[0] & 0x80) {
1327 ast_log(LOG_WARNING, "--- forbidden packet; nal: %02x\n",
1332 type = data[0] & 0x1f;
1336 ast_log(LOG_WARNING, "--- invalid type: %d\n", type);
1343 ast_log(LOG_WARNING, "--- encapsulation not supported : %d\n", type);
1345 case 28: /* FU-A Unit */
1346 if (data[1] & 0x80) { // S == 1, import F and NRI from next
1347 data[1] &= 0x1f; /* preserve type */
1348 data[1] |= (data[0] & 0xe0); /* import F & NRI */
1349 retval = fbuf_append(b, scp, sizeof(scp), 0, 0);
1355 default: /* From 1 to 23 (Single NAL Unit) */
1356 retval = fbuf_append(b, scp, sizeof(scp), 0, 0);
1359 retval = fbuf_append(b, data + ofs, len - ofs, 0, 0);
1361 ast_log(LOG_WARNING, "result %d\n", retval);
1365 static struct video_codec_desc h264_codec = {
1367 .format = AST_FORMAT_H264,
1368 .enc_init = h264_enc_init,
1369 .enc_encap = h264_encap,
1370 .enc_run = ffmpeg_encode,
1371 .dec_init = h264_dec_init,
1372 .dec_decap = h264_decap,
1373 .dec_run = ffmpeg_decode
1376 /*------ end codec specific code -----*/
1379 /* Video4Linux stuff is only used in video_open() */
1380 #ifdef HAVE_VIDEODEV_H
1381 #include <linux/videodev.h>
1385 * Open the local video source and allocate a buffer
1386 * for storing the image. Return 0 on success, -1 on error
1388 static int video_open(struct video_out_desc *v)
1390 struct fbuf_t *b = &v->loc_src;
1391 if (b->data) /* buffer allocated means device already open */
1395 * if the device is "X11", then open the x11 grabber
1397 if (!strcasecmp(v->videodevice, "X11")) {
1401 /* init the connection with the X server */
1402 v->dpy = XOpenDisplay(NULL);
1403 if (v->dpy == NULL) {
1404 ast_log(LOG_WARNING, "error opening display\n");
1408 /* find width and height of the screen */
1409 screen_num = DefaultScreen(v->dpy);
1410 v->screen_width = DisplayWidth(v->dpy, screen_num);
1411 v->screen_height = DisplayHeight(v->dpy, screen_num);
1413 v->image = im = XGetImage(v->dpy,
1414 RootWindow(v->dpy, DefaultScreen(v->dpy)),
1415 b->x, b->y, b->w, b->h, AllPlanes, ZPixmap);
1416 if (v->image == NULL) {
1417 ast_log(LOG_WARNING, "error creating Ximage\n");
1420 switch (im->bits_per_pixel) {
1422 b->pix_fmt = PIX_FMT_RGBA32;
1425 b->pix_fmt = (im->green_mask == 0x7e0) ? PIX_FMT_RGB565 : PIX_FMT_RGB555;
1429 ast_log(LOG_NOTICE, "image: data %p %d bpp fmt %d, mask 0x%lx 0x%lx 0x%lx\n",
1433 im->red_mask, im->green_mask, im->blue_mask);
1435 /* set the pointer but not the size as this is not malloc'ed */
1436 b->data = (uint8_t *)im->data;
1439 #ifdef HAVE_VIDEODEV_H
1442 struct video_window vw = { 0 }; /* camera attributes */
1443 struct video_picture vp;
1445 const char *dev = v->videodevice;
1447 v->fd = open(dev, O_RDONLY | O_NONBLOCK);
1449 ast_log(LOG_WARNING, "error opening camera %s\n", v->videodevice);
1453 i = fcntl(v->fd, F_GETFL);
1454 if (-1 == fcntl(v->fd, F_SETFL, i | O_NONBLOCK)) {
1455 /* non fatal, just emit a warning */
1456 ast_log(LOG_WARNING, "error F_SETFL for %s [%s]\n",
1457 dev, strerror(errno));
1459 /* set format for the camera.
1460 * In principle we could retry with a different format if the
1461 * one we are asking for is not supported.
1463 vw.width = v->loc_src.w;
1464 vw.height = v->loc_src.h;
1465 vw.flags = v->fps << 16;
1466 if (ioctl(v->fd, VIDIOCSWIN, &vw) == -1) {
1467 ast_log(LOG_WARNING, "error setting format for %s [%s]\n",
1468 dev, strerror(errno));
1471 if (ioctl(v->fd, VIDIOCGPICT, &vp) == -1) {
1472 ast_log(LOG_WARNING, "error reading picture info\n");
1475 ast_log(LOG_WARNING,
1476 "contrast %d bright %d colour %d hue %d white %d palette %d\n",
1477 vp.contrast, vp.brightness,
1479 vp.whiteness, vp.palette);
1480 /* set the video format. Here again, we don't necessary have to
1481 * fail if the required format is not supported, but try to use
1482 * what the camera gives us.
1484 b->pix_fmt = vp.palette;
1485 vp.palette = VIDEO_PALETTE_YUV420P;
1486 if (ioctl(v->fd, VIDIOCSPICT, &vp) == -1) {
1487 ast_log(LOG_WARNING, "error setting palette, using %d\n",
1490 b->pix_fmt = vp.palette;
1491 /* allocate the source buffer.
1492 * XXX, the code here only handles yuv411, for other formats
1493 * we need to look at pix_fmt and set size accordingly
1495 b->size = (b->w * b->h * 3)/2; /* yuv411 */
1496 ast_log(LOG_WARNING, "videodev %s opened, size %dx%d %d\n",
1497 dev, b->w, b->h, b->size);
1498 v->loc_src.data = ast_calloc(1, b->size);
1500 ast_log(LOG_WARNING, "error allocating buffer %d bytes\n",
1504 ast_log(LOG_WARNING, "success opening camera\n");
1506 #endif /* HAVE_VIDEODEV_H */
1508 if (v->image == NULL && v->fd < 0)
1514 ast_log(LOG_WARNING, "fd %d dpy %p img %p data %p\n",
1515 v->fd, v->dpy, v->image, v->loc_src.data);
1516 /* XXX maybe XDestroy (v->image) ? */
1518 XCloseDisplay(v->dpy);
1523 fbuf_free(&v->loc_src);
1527 /*! \brief complete a buffer from the local video source.
1528 * Called by get_video_frames(), in turn called by the video thread.
1530 static int video_read(struct video_out_desc *v)
1532 struct timeval now = ast_tvnow();
1533 struct fbuf_t *b = &v->loc_src;
1535 if (b->data == NULL) /* not initialized */
1538 /* check if it is time to read */
1539 if (ast_tvzero(v->last_frame))
1540 v->last_frame = now;
1541 if (ast_tvdiff_ms(now, v->last_frame) < 1000/v->fps)
1542 return 0; /* too early */
1543 v->last_frame = now; /* XXX actually, should correct for drift */
1547 /* read frame from X11 */
1549 XGetSubImage(v->dpy,
1550 RootWindow(v->dpy, DefaultScreen(v->dpy)),
1551 b->x, b->y, b->w, b->h, AllPlanes, ZPixmap, v->image, 0, 0);
1553 b->data = (uint8_t *)v->image->data;
1555 return p.linesize[0] * b->h;
1558 if (v->fd < 0) /* no other source */
1561 int r, l = v->loc_src.size - v->loc_src.used;
1562 r = read(v->fd, v->loc_src.data + v->loc_src.used, l);
1563 // ast_log(LOG_WARNING, "read %d of %d bytes from webcam\n", r, l);
1564 if (r < 0) /* read error */
1566 if (r == 0) /* no data */
1568 v->loc_src.used += r;
1570 v->loc_src.used = 0; /* prepare for next frame */
1571 return v->loc_src.size;
1576 /* Helper function to process incoming video.
1577 * For each incoming video call invoke ffmpeg_init() to intialize
1578 * the decoding structure then incoming video frames are processed
1579 * by write_video() which in turn calls pre_process_data(), to extract
1580 * the bitstream; accumulates data into a buffer within video_desc. When
1581 * a frame is complete (determined by the marker bit in the RTP header)
1582 * call decode_video() to decoding and if it successful call show_frame()
1583 * to display the frame.
1587 * Table of translation between asterisk and ffmpeg formats.
1588 * We need also a field for read and write (encoding and decoding), because
1589 * e.g. H263+ uses different codec IDs in ffmpeg when encoding or decoding.
1591 struct _cm { /* map ffmpeg codec types to asterisk formats */
1592 uint32_t ast_format; /* 0 is a terminator */
1594 enum { CM_RD = 1, CM_WR = 2, CM_RDWR = 3 } rw; /* read or write or both ? */
1595 struct video_codec_desc *codec_desc;
1598 static struct _cm video_formats[] = {
1599 { AST_FORMAT_H263_PLUS, CODEC_ID_H263, CM_RD }, /* incoming H263P ? */
1600 { AST_FORMAT_H263_PLUS, CODEC_ID_H263P, CM_WR },
1601 { AST_FORMAT_H263, CODEC_ID_H263, CM_RD },
1602 { AST_FORMAT_H263, CODEC_ID_H263, CM_WR },
1603 { AST_FORMAT_H261, CODEC_ID_H261, CM_RDWR },
1604 { AST_FORMAT_H264, CODEC_ID_H264, CM_RDWR },
1605 { AST_FORMAT_MP4_VIDEO, CODEC_ID_MPEG4, CM_RDWR },
1610 /*! \brief map an asterisk format into an ffmpeg one */
1611 static enum CodecID map_video_format(uint32_t ast_format, int rw)
1615 for (i = video_formats; i->ast_format != 0; i++)
1616 if (ast_format & i->ast_format && rw & i->rw && rw & i->rw)
1618 return CODEC_ID_NONE;
1621 /* pointers to supported codecs. We assume the first one to be non null. */
1622 static struct video_codec_desc *supported_codecs[] = {
1632 * Map the AST_FORMAT to the library. If not recognised, fail.
1633 * This is useful in the input path where we get frames.
1635 static struct video_codec_desc *map_video_codec(int fmt)
1639 for (i = 0; supported_codecs[i]; i++)
1640 if (fmt == supported_codecs[i]->format) {
1641 ast_log(LOG_WARNING, "using %s for format 0x%x\n",
1642 supported_codecs[i]->name, fmt);
1643 return supported_codecs[i];
1649 * Map the codec name to the library. If not recognised, use a default.
1650 * This is useful in the output path where we decide by name, presumably.
1652 static struct video_codec_desc *map_config_video_format(char *name)
1656 for (i = 0; supported_codecs[i]; i++)
1657 if (!strcasecmp(name, supported_codecs[i]->name))
1659 if (supported_codecs[i] == NULL) {
1660 ast_log(LOG_WARNING, "Cannot find codec for '%s'\n", name);
1662 strcpy(name, supported_codecs[i]->name);
1664 ast_log(LOG_WARNING, "Using codec '%s'\n", name);
1665 return supported_codecs[i];
1668 /*! \brief uninitialize the descriptor for remote video stream */
1669 static int video_in_uninit(struct video_in_desc *v)
1674 av_parser_close(v->parser);
1678 avcodec_close(v->dec_ctx);
1679 av_free(v->dec_ctx);
1683 av_free(v->d_frame);
1686 v->codec = NULL; /* only a reference */
1687 v->dec = NULL; /* forget the decoder */
1688 v->discard = 1; /* start in discard mode */
1689 for (i = 0; i < N_DEC_IN; i++)
1690 fbuf_free(&v->dec_in[i]);
1691 fbuf_free(&v->dec_out);
1692 fbuf_free(&v->rem_dpy);
1693 return -1; /* error, in case someone cares */
1697 * initialize ffmpeg resources used for decoding frames from the network.
1699 static int video_in_init(struct video_in_desc *v, uint32_t format)
1703 /* XXX should check that these are already set */
1710 codec = map_video_format(format, CM_RD);
1712 v->codec = avcodec_find_decoder(codec);
1714 ast_log(LOG_WARNING, "Unable to find the decoder for format %d\n", codec);
1715 return video_in_uninit(v);
1718 * Initialize the codec context.
1720 v->dec_ctx = avcodec_alloc_context();
1721 if (avcodec_open(v->dec_ctx, v->codec) < 0) {
1722 ast_log(LOG_WARNING, "Cannot open the codec context\n");
1723 av_free(v->dec_ctx);
1725 return video_in_uninit(v);
1728 v->parser = av_parser_init(codec);
1730 ast_log(LOG_WARNING, "Cannot initialize the decoder parser\n");
1731 return video_in_uninit(v);
1734 v->d_frame = avcodec_alloc_frame();
1736 ast_log(LOG_WARNING, "Cannot allocate decoding video frame\n");
1737 return video_in_uninit(v);
1742 /*! \brief uninitialize the descriptor for local video stream */
1743 static int video_out_uninit(struct video_out_desc *v)
1746 avcodec_close(v->enc_ctx);
1747 av_free(v->enc_ctx);
1754 v->codec = NULL; /* only a reference */
1756 fbuf_free(&v->loc_src);
1757 fbuf_free(&v->enc_in);
1758 fbuf_free(&v->enc_out);
1759 fbuf_free(&v->loc_dpy);
1760 if (v->image) { /* X11 grabber */
1761 XCloseDisplay(v->dpy);
1773 * Initialize the encoder for the local source:
1774 * - AVCodecContext, AVCodec, AVFrame are used by ffmpeg for encoding;
1775 * - encbuf is used to store the encoded frame (to be sent)
1776 * - mtu is used to determine the max size of video fragment
1777 * NOTE: we enter here with the video source already open.
1779 static int video_out_init(struct video_desc *env)
1783 struct fbuf_t *enc_in;
1784 struct video_out_desc *v = &env->out;
1789 v->enc_out.data = NULL;
1791 if (v->loc_src.data == NULL) {
1792 ast_log(LOG_WARNING, "No local source active\n");
1793 return video_out_uninit(v);
1795 codec = map_video_format(v->enc->format, CM_WR);
1796 v->codec = avcodec_find_encoder(codec);
1798 ast_log(LOG_WARNING, "Cannot find the encoder for format %d\n",
1800 return video_out_uninit(v);
1803 v->mtu = 1400; /* set it early so the encoder can use it */
1805 /* allocate the input buffer for encoding.
1806 * ffmpeg only supports PIX_FMT_YUV420P for the encoding.
1808 enc_in = &v->enc_in;
1809 enc_in->pix_fmt = PIX_FMT_YUV420P;
1810 enc_in->size = (enc_in->w * enc_in->h * 3)/2;
1811 enc_in->data = ast_calloc(1, enc_in->size);
1812 if (!enc_in->data) {
1813 ast_log(LOG_WARNING, "Cannot allocate encoder input buffer\n");
1814 return video_out_uninit(v);
1816 v->frame = avcodec_alloc_frame();
1818 ast_log(LOG_WARNING, "Unable to allocate the encoding video frame\n");
1819 return video_out_uninit(v);
1822 /* parameters for PIX_FMT_YUV420P */
1823 size = enc_in->w * enc_in->h;
1824 v->frame->data[0] = enc_in->data;
1825 v->frame->data[1] = v->frame->data[0] + size;
1826 v->frame->data[2] = v->frame->data[1] + size/4;
1827 v->frame->linesize[0] = enc_in->w;
1828 v->frame->linesize[1] = enc_in->w/2;
1829 v->frame->linesize[2] = enc_in->w/2;
1831 /* now setup the parameters for the encoder */
1832 v->enc_ctx = avcodec_alloc_context();
1833 v->enc_ctx->pix_fmt = enc_in->pix_fmt;
1834 v->enc_ctx->width = enc_in->w;
1835 v->enc_ctx->height = enc_in->h;
1836 /* XXX rtp_callback ?
1837 * rtp_mode so ffmpeg inserts as many start codes as possible.
1839 v->enc_ctx->rtp_mode = 1;
1840 v->enc_ctx->rtp_payload_size = v->mtu / 2; // mtu/2
1841 v->enc_ctx->bit_rate = v->bitrate;
1842 v->enc_ctx->bit_rate_tolerance = v->enc_ctx->bit_rate/2;
1843 v->enc_ctx->qmin = v->qmin; /* should be configured */
1844 v->enc_ctx->time_base = (AVRational){1, v->fps};
1846 v->enc->enc_init(v);
1848 if (avcodec_open(v->enc_ctx, v->codec) < 0) {
1849 ast_log(LOG_WARNING, "Unable to initialize the encoder %d\n",
1851 av_free(v->enc_ctx);
1853 return video_out_uninit(v);
1857 * Allocate enough for the encoded bitstream. As we are compressing,
1858 * we hope that the output is never larger than the input size.
1860 v->enc_out.data = ast_calloc(1, enc_in->size);
1861 v->enc_out.size = enc_in->size;
1862 v->enc_out.used = 0;
1867 static void cleanup_sdl(struct video_desc *env)
1872 /* unload font file */
1873 if (env->gui.font) {
1874 TTF_CloseFont(env->gui.font);
1875 env->gui.font = NULL;
1878 /* uninitialize SDL_ttf library */
1879 if ( TTF_WasInit() )
1883 /* uninitialize the SDL environment */
1884 for (i = 0; i < WIN_MAX; i++) {
1885 if (env->win[i].bmp)
1886 SDL_FreeYUVOverlay(env->win[i].bmp);
1888 if (env->gui.keypad)
1889 SDL_FreeSurface(env->gui.keypad);
1890 env->gui.keypad = NULL;
1892 env->screen = NULL; /* XXX check reference */
1893 bzero(env->win, sizeof(env->win));
1895 ast_mutex_destroy(&(env->in.dec_in_lock));
1898 /*! \brief uninitialize the entire environment.
1899 * In practice, signal the thread and give it a bit of time to
1900 * complete, giving up if it gets stuck. Because uninit
1901 * is called from hangup with the channel locked, and the thread
1902 * uses the chan lock, we need to unlock here. This is unsafe,
1903 * and we should really use refcounts for the channels.
1905 void console_video_uninit(struct video_desc *env)
1907 int i, t = 100; /* initial wait is shorter, than make it longer */
1909 for (i=0; env->shutdown && i < 10; i++) {
1910 ast_channel_unlock(env->owner);
1913 ast_channel_lock(env->owner);
1918 /*! fill an AVPicture from our fbuf info, as it is required by
1919 * the image conversion routines in ffmpeg.
1920 * XXX This depends on the format.
1922 static AVPicture *fill_pict(struct fbuf_t *b, AVPicture *p)
1924 /* provide defaults for commonly used formats */
1925 int l4 = b->w * b->h/4; /* size of U or V frame */
1926 int len = b->w; /* Y linesize, bytes */
1927 int luv = b->w/2; /* U/V linesize, bytes */
1929 bzero(p, sizeof(*p));
1930 switch (b->pix_fmt) {
1931 case PIX_FMT_RGB555:
1932 case PIX_FMT_RGB565:
1936 case PIX_FMT_RGBA32:
1940 case PIX_FMT_YUYV422: /* Packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr */
1941 len *= 2; /* all data in first plane, probably */
1945 p->data[0] = b->data;
1946 p->linesize[0] = len;
1947 /* these are only valid for component images */
1948 p->data[1] = luv ? b->data + 4*l4 : b->data+len;
1949 p->data[2] = luv ? b->data + 5*l4 : b->data+len;
1950 p->linesize[1] = luv;
1951 p->linesize[2] = luv;
1955 /*! convert/scale between an input and an output format.
1956 * Old version of ffmpeg only have img_convert, which does not rescale.
1957 * New versions use sws_scale which does both.
1959 static void my_scale(struct fbuf_t *in, AVPicture *p_in,
1960 struct fbuf_t *out, AVPicture *p_out)
1962 AVPicture my_p_in, my_p_out;
1965 p_in = fill_pict(in, &my_p_in);
1967 p_out = fill_pict(out, &my_p_out);
1970 /* XXX img_convert is deprecated, and does not do rescaling */
1971 img_convert(p_out, out->pix_fmt,
1972 p_in, in->pix_fmt, in->w, in->h);
1973 #else /* XXX replacement */
1975 struct SwsContext *convert_ctx;
1977 convert_ctx = sws_getContext(in->w, in->h, in->pix_fmt,
1978 out->w, out->h, out->pix_fmt,
1979 SWS_BICUBIC, NULL, NULL, NULL);
1980 if (convert_ctx == NULL) {
1981 ast_log(LOG_ERROR, "FFMPEG::convert_cmodel : swscale context initialization failed");
1985 ast_log(LOG_WARNING, "in %d %dx%d out %d %dx%d\n",
1986 in->pix_fmt, in->w, in->h, out->pix_fmt, out->w, out->h);
1987 sws_scale(convert_ctx,
1988 p_in->data, p_in->linesize,
1989 in->w, in->h, /* src slice */
1990 p_out->data, p_out->linesize);
1992 sws_freeContext(convert_ctx);
1994 #endif /* XXX replacement */
1998 * Display video frames (from local or remote stream) using the SDL library.
1999 * - Set the video mode to use the resolution specified by the codec context
2000 * - Create a YUV Overlay to copy the frame into it;
2001 * - After the frame is copied into the overlay, display it
2003 * The size is taken from the configuration.
2005 * 'out' is 0 for remote video, 1 for the local video
2007 static void show_frame(struct video_desc *env, int out)
2009 AVPicture *p_in, p_out;
2010 struct fbuf_t *b_in, *b_out;
2016 if (out == WIN_LOCAL) { /* webcam/x11 to sdl */
2017 b_in = &env->out.enc_in;
2018 b_out = &env->out.loc_dpy;
2021 /* copy input format from the decoding context */
2022 AVCodecContext *c = env->in.dec_ctx;
2023 b_in = &env->in.dec_out;
2024 b_in->pix_fmt = c->pix_fmt;
2026 b_in->h = c->height;
2028 b_out = &env->in.rem_dpy;
2029 p_in = (AVPicture *)env->in.d_frame;
2031 bmp = env->win[out].bmp;
2032 SDL_LockYUVOverlay(bmp);
2033 /* output picture info - this is sdl, YUV420P */
2034 bzero(&p_out, sizeof(p_out));
2035 p_out.data[0] = bmp->pixels[0];
2036 p_out.data[1] = bmp->pixels[1];
2037 p_out.data[2] = bmp->pixels[2];
2038 p_out.linesize[0] = bmp->pitches[0];
2039 p_out.linesize[1] = bmp->pitches[1];
2040 p_out.linesize[2] = bmp->pitches[2];
2042 my_scale(b_in, p_in, b_out, &p_out);
2044 /* lock to protect access to Xlib by different threads. */
2045 SDL_DisplayYUVOverlay(bmp, &env->win[out].rect);
2046 SDL_UnlockYUVOverlay(bmp);
2049 struct video_desc *get_video_desc(struct ast_channel *c);
2052 * This function is called (by asterisk) for each video packet
2053 * coming from the network (the 'in' path) that needs to be processed.
2054 * We need to reconstruct the entire video frame before we can decode it.
2055 * After a video packet is received we have to:
2056 * - extract the bitstream with pre_process_data()
2057 * - append the bitstream to a buffer
2058 * - if the fragment is the last (RTP Marker) we decode it with decode_video()
2059 * - after the decoding is completed we display the decoded frame with show_frame()
2061 int console_write_video(struct ast_channel *chan, struct ast_frame *f);
2062 int console_write_video(struct ast_channel *chan, struct ast_frame *f)
2064 struct video_desc *env = get_video_desc(chan);
2065 struct video_in_desc *v = &env->in;
2067 if (v->dec == NULL) { /* try to get the codec */
2068 v->dec = map_video_codec(f->subclass & ~1);
2069 if (v->dec == NULL) {
2070 ast_log(LOG_WARNING, "cannot find video codec, drop input 0x%x\n", f->subclass);
2073 if (video_in_init(v, v->dec->format)) {
2074 /* This is not fatal, but we won't have incoming video */
2075 ast_log(LOG_WARNING, "Cannot initialize input decoder\n");
2080 if (v->dec_ctx == NULL) {
2081 ast_log(LOG_WARNING, "cannot decode, dropping frame\n");
2082 return 0; /* error */
2085 if (v->dec_in_cur == NULL) /* no buffer for incoming frames, drop */
2087 #if defined(DROP_PACKETS) && DROP_PACKETS > 0
2088 /* Simulate lost packets */
2089 if ((random() % 10000) <= 100*DROP_PACKETS) {
2090 ast_log(LOG_NOTICE, "Packet lost [%d]\n", f->seqno);
2096 * In discard mode, drop packets until we find one with
2097 * the RTP marker set (which is the end of frame).
2098 * Note that the RTP marker flag is sent as the LSB of the
2099 * subclass, which is a bitmask of formats. The low bit is
2100 * normally used for audio so there is no interference.
2102 if (f->subclass & 0x01) {
2103 v->dec_in_cur->used = 0;
2104 v->dec_in_cur->ebit = 0;
2105 v->next_seq = f->seqno + 1; /* wrap at 16 bit */
2107 ast_log(LOG_WARNING, "out of discard mode, frame %d\n", f->seqno);
2113 * Only in-order fragments will be accepted. Remember seqno
2114 * has 16 bit so there is wraparound. Also, ideally we could
2115 * accept a bit of reordering, but at the moment we don't.
2117 if (v->next_seq != f->seqno) {
2118 ast_log(LOG_WARNING, "discarding frame out of order, %d %d\n",
2119 v->next_seq, f->seqno);
2125 if (f->data == NULL || f->datalen < 2) {
2126 ast_log(LOG_WARNING, "empty video frame, discard\n");
2129 if (v->dec->dec_decap(v->dec_in_cur, f->data, f->datalen)) {
2130 ast_log(LOG_WARNING, "error in dec_decap, enter discard\n");
2133 if (f->subclass & 0x01) { // RTP Marker
2134 /* prepare to decode: advance the buffer so the video thread knows. */
2135 struct fbuf_t *tmp = v->dec_in_cur; /* store current pointer */
2136 ast_mutex_lock(&v->dec_in_lock);
2137 if (++v->dec_in_cur == &v->dec_in[N_DEC_IN]) /* advance to next, circular */
2138 v->dec_in_cur = &v->dec_in[0];
2139 if (v->dec_in_dpy == NULL) { /* were not displaying anything, so set it */
2140 v->dec_in_dpy = tmp;
2141 } else if (v->dec_in_dpy == v->dec_in_cur) { /* current slot is busy */
2142 v->dec_in_cur = NULL;
2144 ast_mutex_unlock(&v->dec_in_lock);
2150 /*! \brief read a frame from webcam or X11 through video_read(),
2151 * display it, then encode and split it.
2152 * Return a list of ast_frame representing the video fragments.
2153 * The head pointer is returned by the function, the tail pointer
2154 * is returned as an argument.
2156 static struct ast_frame *get_video_frames(struct video_desc *env, struct ast_frame **tail)
2158 struct video_out_desc *v = &env->out;
2159 struct ast_frame *dummy;
2161 if (!v->loc_src.data) {
2162 static volatile int a = 0;
2164 ast_log(LOG_WARNING, "fail, no loc_src buffer\n");
2168 return NULL; /* can happen, e.g. we are reading too early */
2173 /* Scale the video for the encoder, then use it for local rendering
2174 * so we will see the same as the remote party.
2176 my_scale(&v->loc_src, NULL, &v->enc_in, NULL);
2177 show_frame(env, WIN_LOCAL);
2180 if (v->enc_out.data == NULL) {
2181 static volatile int a = 0;
2183 ast_log(LOG_WARNING, "fail, no encbuf\n");
2187 return v->enc->enc_encap(v, tail);
2191 * GUI layout, structure and management
2194 For the GUI we use SDL to create a large surface (env->screen)
2195 containing tree sections: remote video on the left, local video
2196 on the right, and the keypad with all controls and text windows
2198 The central section is built using two images: one is the skin,
2199 the other one is a mask where the sensitive areas of the skin
2200 are colored in different grayscale levels according to their
2201 functions. The mapping between colors and function is defined
2202 in the 'enum pixel_value' below.
2204 Mouse and keyboard events are detected on the whole surface, and
2205 handled differently according to their location, as follows:
2207 - drag on the local video window are used to move the captured
2208 area (in the case of X11 grabber) or the picture-in-picture
2209 location (in case of camera included on the X11 grab).
2210 - click on the keypad are mapped to the corresponding key;
2211 - drag on some keypad areas (sliders etc.) are mapped to the
2212 corresponding functions;
2213 - keystrokes are used as keypad functions, or as text input
2214 if we are in text-input mode.
2216 To manage these behavior we use two status variables,
2217 that defines if keyboard events should be redirect to dialing functions
2218 or to write message functions, and if mouse events should be used
2219 to implement keypad functionalities or to drag the capture device.
2221 Configuration options control the appeareance of the gui:
2223 keypad = /tmp/phone.jpg ; the keypad on the screen
2224 keypad_mask = /tmp/phone.png ; the grayscale mask
2225 keypad_font = /tmp/font.ttf ; the font to use for output
2230 /* enumerate for the pixel value. 0..127 correspond to ascii chars */
2232 /* answer/close functions */
2236 /* other functions */
2238 KEY_AUTOANSWER = 131,
2239 KEY_SENDVIDEO = 132,
2240 KEY_LOCALVIDEO = 133,
2241 KEY_REMOTEVIDEO = 134,
2242 KEY_WRITEMESSAGE = 135,
2243 KEY_GUI_CLOSE = 136, /* close gui */
2245 /* other areas within the keypad */
2246 KEY_DIGIT_BACKGROUND = 255,
2248 /* areas outside the keypad - simulated */
2249 KEY_OUT_OF_KEYPAD = 251,
2255 * Handlers for the various keypad functions
2258 /*! \brief append a character, or reset if '\0' */
2259 static void append_char(char *str, int *str_pos, const char c)
2264 else if (i < GUI_BUFFER_LEN - 1)
2267 i = GUI_BUFFER_LEN - 1; /* unnecessary, i think */
2272 /* accumulate digits, possibly call dial if in connected mode */
2273 static void keypad_digit(struct video_desc *env, int digit)
2275 if (env->owner) { /* we have a call, send the digit */
2276 struct ast_frame f = { AST_FRAME_DTMF, 0 };
2279 ast_queue_frame(env->owner, &f);
2280 } else { /* no call, accumulate digits */
2281 append_char(env->gui.inbuf, &env->gui.inbuf_pos, digit);
2285 /* this is a wrapper for actions that are available through the cli */
2286 /* TODO append arg to command and send the resulting string as cli command */
2287 static void keypad_send_command(struct video_desc *env, char *command)
2289 ast_log(LOG_WARNING, "keypad_send_command(%s) called\n", command);
2290 ast_cli_command(env->gui.outfd, command);
2294 /* function used to toggle on/off the status of some variables */
2295 static char *keypad_toggle(struct video_desc *env, int index)
2297 ast_log(LOG_WARNING, "keypad_toggle(%i) called\n", index);
2301 env->out.sendvideo = !env->out.sendvideo;
2305 struct chan_oss_pvt *o = find_desc(oss_active);
2309 case KEY_AUTOANSWER: {
2310 struct chan_oss_pvt *o = find_desc(oss_active);
2311 o->autoanswer = !o->autoanswer;
2319 char *console_do_answer(int fd);
2321 * Function called when the pick up button is pressed
2322 * perform actions according the channel status:
2324 * - if no one is calling us and no digits was pressed,
2325 * the operation have no effects,
2326 * - if someone is calling us we answer to the call.
2327 * - if we have no call in progress and we pressed some
2328 * digit, send the digit to the console.
2330 static void keypad_pick_up(struct video_desc *env)
2332 ast_log(LOG_WARNING, "keypad_pick_up called\n");
2334 if (env->owner) { /* someone is calling us, just answer */
2335 console_do_answer(-1);
2336 } else if (env->gui.inbuf_pos) { /* we have someone to call */
2337 ast_cli_command(env->gui.outfd, env->gui.inbuf);
2340 append_char(env->gui.inbuf, &env->gui.inbuf_pos, '\0'); /* clear buffer */
2343 #if 0 /* still unused */
2345 * As an alternative to SDL_TTF, we can simply load the font from
2346 * an image and blit characters on the background of the GUI.
2348 * To generate a font we can use the 'fly' command with the
2349 * following script (3 lines with 32 chars each)
2354 string 255,255,255, 0, 0,giant, !"#$%&'()*+,-./0123456789:;<=>?
2355 string 255,255,255, 0,20,giant,@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
2356 string 255,255,255, 0,40,giant,`abcdefghijklmnopqrstuvwxyz{|}~
2361 /* Print given text on the gui */
2362 static int gui_output(struct video_desc *env, const char *text)
2364 #ifndef HAVE_SDL_TTF
2365 return 1; /* error, not supported */
2367 int x = 30, y = 20; /* XXX change */
2368 SDL_Surface *output = NULL;
2369 SDL_Color color = {0, 0, 0}; /* text color */
2370 SDL_Rect dest = {env->win[WIN_KEYPAD].rect.x + x, y};
2372 /* clean surface each rewrite */
2373 SDL_BlitSurface(env->gui.keypad, NULL, env->screen, &env->win[WIN_KEYPAD].rect);
2375 output = TTF_RenderText_Solid(env->gui.font, text, color);
2376 if (output == NULL) {
2377 ast_log(LOG_WARNING, "Cannot render text on gui - %s\n", TTF_GetError());
2381 SDL_BlitSurface(output, NULL, env->screen, &dest);
2383 SDL_UpdateRects(env->gui.keypad, 1, &env->win[WIN_KEYPAD].rect);
2384 SDL_FreeSurface(output);
2385 return 0; /* success */
2390 static int video_geom(struct fbuf_t *b, const char *s);
2391 static void sdl_setup(struct video_desc *env);
2392 static int kp_match_area(const struct keypad_entry *e, int x, int y);
2395 * Handle SDL_MOUSEBUTTONDOWN type, finding the palette
2396 * index value and calling the right callback.
2398 * x, y are referred to the upper left corner of the main SDL window.
2400 static void handle_button_event(struct video_desc *env, SDL_MouseButtonEvent button)
2402 uint8_t index = KEY_OUT_OF_KEYPAD; /* the key or region of the display we clicked on */
2404 /* for each click we come back in normal mode */
2405 env->gui.text_mode = 0;
2407 /* define keypad boundary */
2408 if (button.x < env->in.rem_dpy.w)
2409 index = KEY_REM_DPY; /* click on remote video */
2410 else if (button.x > env->in.rem_dpy.w + env->out.keypad_dpy.w)
2411 index = KEY_LOC_DPY; /* click on local video */
2412 else if (button.y > env->out.keypad_dpy.h)
2413 index = KEY_OUT_OF_KEYPAD; /* click outside the keypad */
2414 else if (env->gui.kp) {
2416 for (i = 0; i < env->gui.kp_used; i++) {
2417 if (kp_match_area(&env->gui.kp[i], button.x - env->in.rem_dpy.w, button.y)) {
2418 index = env->gui.kp[i].c;
2424 /* exec the function */
2425 if (index < 128) { /* surely clicked on the keypad, don't care which key */
2426 keypad_digit(env, index);
2430 /* answer/close function */
2432 keypad_pick_up(env);
2435 keypad_send_command(env, "console hangup");
2438 /* other functions */
2440 case KEY_AUTOANSWER:
2442 keypad_toggle(env, index);
2445 case KEY_LOCALVIDEO:
2447 case KEY_REMOTEVIDEO:
2449 case KEY_WRITEMESSAGE:
2450 /* goes in text-mode */
2451 env->gui.text_mode = 1;
2455 /* press outside the keypad. right increases size, center decreases, left drags */
2458 if (button.button == SDL_BUTTON_LEFT) {
2459 if (index == KEY_LOC_DPY) {
2460 /* store points where the drag start
2461 * and switch in drag mode */
2462 env->gui.x_drag = button.x;
2463 env->gui.y_drag = button.y;
2464 env->gui.drag_mode = 1;
2469 struct fbuf_t *fb = index == KEY_LOC_DPY ? &env->out.loc_dpy : &env->in.rem_dpy;
2470 sprintf(buf, "%c%dx%d", button.button == SDL_BUTTON_RIGHT ? '>' : '<',
2472 video_geom(fb, buf);
2476 case KEY_OUT_OF_KEYPAD:
2482 case KEY_DIGIT_BACKGROUND:
2485 ast_log(LOG_WARNING, "function not yet defined %i\n", index);
2490 * Handle SDL_KEYDOWN type event, put the key pressed
2491 * in the dial buffer or in the text-message buffer,
2492 * depending on the text_mode variable value.
2494 * key is the SDLKey structure corresponding to the key pressed.
2496 static void handle_keyboard_input(struct video_desc *env, SDLKey key)
2498 if (env->gui.text_mode) {
2499 /* append in the text-message buffer */
2500 if (key == SDLK_RETURN) {
2501 /* send the text message and return in normal mode */
2502 env->gui.text_mode = 0;
2503 keypad_send_command(env, "send text");
2505 /* accumulate the key in the message buffer */
2506 append_char(env->gui.msgbuf, &env->gui.msgbuf_pos, key);
2510 /* append in the dial buffer */
2511 append_char(env->gui.inbuf, &env->gui.inbuf_pos, key);
2518 * Check if the grab point is inside the X screen.
2520 * x represent the new grab value
2521 * limit represent the upper value to use
2523 static int boundary_checks(int x, int limit)
2525 return (x <= 0) ? 0 : (x > limit ? limit : x);
2528 /* implement superlinear acceleration on the movement */
2529 static int move_accel(int delta)
2531 int d1 = delta*delta / 100;
2532 return (delta > 0) ? delta + d1 : delta - d1;
2536 * Move the source of the captured video.
2538 * x_final_drag and y_final_drag are the coordinates where the drag ends,
2539 * start coordinares are in the gui_info structure.
2541 static void move_capture_source(struct video_desc *env, int x_final_drag, int y_final_drag)
2543 int new_x, new_y; /* new coordinates for grabbing local video */
2544 int x = env->out.loc_src.x; /* old value */
2545 int y = env->out.loc_src.y; /* old value */
2547 /* move the origin */
2548 #define POLARITY -1 /* +1 or -1 depending on the desired direction */
2549 new_x = x + POLARITY*move_accel(x_final_drag - env->gui.x_drag) * 3;
2550 new_y = y + POLARITY*move_accel(y_final_drag - env->gui.y_drag) * 3;
2552 env->gui.x_drag = x_final_drag; /* update origin */
2553 env->gui.y_drag = y_final_drag;
2555 /* check boundary and let the source to grab from the new points */
2556 env->out.loc_src.x = boundary_checks(new_x, env->out.screen_width - env->out.loc_src.w);
2557 env->out.loc_src.y = boundary_checks(new_y, env->out.screen_height - env->out.loc_src.h);
2562 * I am seeing some kind of deadlock or stall around
2563 * SDL_PumpEvents() while moving the window on a remote X server
2564 * (both xfree-4.4.0 and xorg 7.2)
2565 * and windowmaker. It is unclear what causes it.
2568 /* grab a bunch of events */
2569 static void eventhandler(struct video_desc *env)
2573 SDL_Event ev[N_EVENTS];
2575 #define MY_EV (SDL_MOUSEBUTTONDOWN|SDL_KEYDOWN)
2576 while ( (n = SDL_PeepEvents(ev, N_EVENTS, SDL_GETEVENT, SDL_ALLEVENTS)) > 0) {
2577 for (i = 0; i < n; i++) {
2579 ast_log(LOG_WARNING, "------ event %d at %d %d\n",
2580 ev[i].type, ev[i].button.x, ev[i].button.y);
2582 switch (ev[i].type) {
2584 handle_keyboard_input(env, ev[i].key.keysym.sym);
2586 case SDL_MOUSEMOTION:
2587 if (env->gui.drag_mode != 0)
2588 move_capture_source(env, ev[i].motion.x, ev[i].motion.y);
2590 case SDL_MOUSEBUTTONDOWN:
2591 handle_button_event(env, ev[i].button);
2593 case SDL_MOUSEBUTTONUP:
2594 if (env->gui.drag_mode != 0) {
2595 move_capture_source(env, ev[i].button.x, ev[i].button.y);
2596 env->gui.drag_mode = 0;
2604 struct timeval b, a = ast_tvnow();
2606 //SDL_Lock_EventThread();
2609 i = ast_tvdiff_ms(b, a);
2611 fprintf(stderr, "-------- SDL_PumpEvents took %dms\n", i);
2612 //SDL_Unlock_EventThread();
2616 static SDL_Surface *get_keypad(const char *file)
2620 #ifdef HAVE_SDL_IMAGE
2621 temp = IMG_Load(file);
2623 temp = SDL_LoadBMP(file);
2626 fprintf(stderr, "Unable to load image %s: %s\n",
2627 file, SDL_GetError());
2631 /* TODO: consistency checks, check for bpp, widht and height */
2632 /* Init the mask image used to grab the action. */
2633 static int gui_init(struct video_desc *env)
2635 /* initialize keypad status */
2636 env->gui.text_mode = 0;
2637 env->gui.drag_mode = 0;
2639 /* initialize grab coordinates */
2640 env->out.loc_src.x = 0;
2641 env->out.loc_src.y = 0;
2643 /* initialize keyboard buffer */
2644 append_char(env->gui.inbuf, &env->gui.inbuf_pos, '\0');
2645 append_char(env->gui.msgbuf, &env->gui.msgbuf_pos, '\0');
2648 /* Initialize SDL_ttf library and load font */
2649 if (TTF_Init() == -1) {
2650 ast_log(LOG_WARNING, "Unable to init SDL_ttf, no output available\n");
2654 #define GUI_FONTSIZE 28
2655 env->gui.font = TTF_OpenFont( env->keypad_font, GUI_FONTSIZE);
2656 if (!env->gui.font) {
2657 ast_log(LOG_WARNING, "Unable to load font %s, no output available\n", env->keypad_font);
2660 ast_log(LOG_WARNING, "Loaded font %s\n", env->keypad_font);
2663 env->gui.outfd = open ("/dev/null", O_WRONLY); /* discard output, temporary */
2664 if ( env->gui.outfd < 0 ) {
2665 ast_log(LOG_WARNING, "Unable output fd\n");
2672 static void sdl_setup(struct video_desc *env);
2675 * Helper thread to periodically poll the video source and enqueue the
2676 * generated frames to the channel's queue.
2677 * Using a separate thread also helps because the encoding can be
2678 * computationally expensive so we don't want to starve the main thread.
2680 static void *video_thread(void *arg)
2682 struct video_desc *env = arg;
2686 bzero(env->win, sizeof(env->win));
2688 if (SDL_Init(SDL_INIT_VIDEO)) {
2689 ast_log(LOG_WARNING, "Could not initialize SDL - %s\n",
2691 /* again not fatal, just we won't display anything */
2695 ast_mutex_init(&env->in.dec_in_lock);
2696 /* TODO, segfault if not X display present */
2697 env->gui_ok = !gui_init(env);
2699 ast_log(LOG_WARNING, "cannot init console gui\n");
2701 if (video_open(&env->out)) {
2702 ast_log(LOG_WARNING, "cannot open local video source\n");
2704 /* try to register the fd. Unfortunately, if the webcam
2705 * driver does not support select/poll we are out of luck.
2707 if (env->out.fd >= 0)
2708 ast_channel_set_fd(env->owner, 1, env->out.fd);
2709 video_out_init(env);
2713 /* XXX 20 times/sec */
2714 struct timeval t = { 0, 50000 };
2715 struct ast_frame *p, *f;
2716 struct video_in_desc *v = &env->in;
2717 struct ast_channel *chan = env->owner;
2718 int fd = chan->alertpipe[1];
2720 /* determine if video format changed */
2721 if (count++ % 10 == 0) {
2723 if (env->out.sendvideo)
2724 sprintf(buf, "%s %s %dx%d @@ %dfps %dkbps",
2725 env->out.videodevice, env->codec_name,
2726 env->out.enc_in.w, env->out.enc_in.h,
2727 env->out.fps, env->out.bitrate/1000);
2729 sprintf(buf, "hold");
2730 SDL_WM_SetCaption(buf, NULL);
2733 /* manage keypad events */
2734 /* XXX here we should always check for events,
2735 * otherwise the drag will not work */
2739 /* sleep for a while */
2740 ast_select(0, NULL, NULL, NULL, &t);
2742 SDL_UpdateRects(env->screen, 1, &env->win[WIN_KEYPAD].rect);// XXX inefficient
2744 * While there is something to display, call the decoder and free
2745 * the buffer, possibly enabling the receiver to store new data.
2747 while (v->dec_in_dpy) {
2748 struct fbuf_t *tmp = v->dec_in_dpy; /* store current pointer */
2750 if (v->dec->dec_run(v, tmp))
2751 show_frame(env, WIN_REMOTE);
2752 tmp->used = 0; /* mark buffer as free */
2754 ast_mutex_lock(&v->dec_in_lock);
2755 if (++v->dec_in_dpy == &v->dec_in[N_DEC_IN]) /* advance to next, circular */
2756 v->dec_in_dpy = &v->dec_in[0];
2758 if (v->dec_in_cur == NULL) /* receiver was idle, enable it... */
2759 v->dec_in_cur = tmp; /* using the slot just freed */
2760 else if (v->dec_in_dpy == v->dec_in_cur) /* this was the last slot */
2761 v->dec_in_dpy = NULL; /* nothing more to display */
2762 ast_mutex_unlock(&v->dec_in_lock);
2766 f = get_video_frames(env, &p); /* read and display */
2772 ast_channel_lock(chan);
2774 /* AST_LIST_INSERT_TAIL is only good for one frame, cannot use here */
2775 if (chan->readq.first == NULL) {
2776 chan->readq.first = f;
2778 chan->readq.last->frame_list.next = f;
2780 chan->readq.last = p;
2782 * more or less same as ast_queue_frame, but extra
2783 * write on the alertpipe to signal frames.
2786 int blah = 1, l = sizeof(blah);
2787 for (p = f; p; p = AST_LIST_NEXT(p, frame_list)) {
2788 if (write(fd, &blah, l) != l)
2789 ast_log(LOG_WARNING, "Unable to write to alert pipe on %s, frametype/subclass %d/%d: %s!\n",
2790 chan->name, f->frametype, f->subclass, strerror(errno));
2793 ast_channel_unlock(chan);
2795 /* thread terminating, here could call the uninit */
2796 /* uninitialize the local and remote video environments */
2797 video_in_uninit(&env->in);
2798 video_out_uninit(&env->out);
2807 static void copy_geometry(struct fbuf_t *src, struct fbuf_t *dst)
2815 /*! initialize the video environment.
2816 * Apart from the formats (constant) used by sdl and the codec,
2817 * we use enc_in as the basic geometry.
2819 static void init_env(struct video_desc *env)
2821 struct fbuf_t *c = &(env->out.loc_src); /* local source */
2822 struct fbuf_t *ei = &(env->out.enc_in); /* encoder input */
2823 struct fbuf_t *ld = &(env->out.loc_dpy); /* local display */
2824 struct fbuf_t *rd = &(env->in.rem_dpy); /* remote display */
2826 c->pix_fmt = PIX_FMT_YUV420P; /* default - camera format */
2827 ei->pix_fmt = PIX_FMT_YUV420P; /* encoder input */
2828 if (ei->w == 0 || ei->h == 0) {
2832 ld->pix_fmt = rd->pix_fmt = PIX_FMT_YUV420P; /* sdl format */
2833 /* inherit defaults */
2834 copy_geometry(ei, c); /* camera inherits from encoder input */
2835 copy_geometry(ei, rd); /* remote display inherits from encoder input */
2836 copy_geometry(rd, ld); /* local display inherits from remote display */
2839 /* setup an sdl overlay and associated info, return 0 on success, != 0 on error */
2840 static int set_win(SDL_Surface *screen, struct display_window *win, int fmt,
2841 int w, int h, int x, int y)
2843 win->bmp = SDL_CreateYUVOverlay(w, h, fmt, screen);
2844 if (win->bmp == NULL)
2845 return -1; /* error */
2854 * The first call to the video code, called by oss_new() or similar.
2855 * Here we initialize the various components we use, namely SDL for display,
2856 * ffmpeg for encoding/decoding, and a local video source.
2857 * We do our best to progress even if some of the components are not
2860 void console_video_start(struct video_desc *env, struct ast_channel *owner)
2862 if (env == NULL) /* video not initialized */
2864 if (owner == NULL) /* nothing to do if we don't have a channel */
2868 env->out.enc = map_config_video_format(env->codec_name);
2870 ast_log(LOG_WARNING, "start video out %s %dx%d\n",
2871 env->codec_name, env->out.enc_in.w, env->out.enc_in.h);
2873 * Register all codecs supported by the ffmpeg library.
2874 * We only need to do it once, but probably doesn't
2875 * harm to do it multiple times.
2878 avcodec_register_all();
2879 av_log_set_level(AV_LOG_ERROR); /* only report errors */
2881 if (env->out.fps == 0) {
2883 ast_log(LOG_WARNING, "fps unset, forcing to %d\n", env->out.fps);
2885 if (env->out.bitrate == 0) {
2886 env->out.bitrate = 65000;
2887 ast_log(LOG_WARNING, "bitrate unset, forcing to %d\n", env->out.bitrate);
2890 ast_pthread_create_background(&env->vthread, NULL, video_thread, env);
2893 static int keypad_cfg_read(struct gui_info *gui, const char *val);
2894 /* [re]set the main sdl window, useful in case of resize */
2895 static void sdl_setup(struct video_desc *env)
2897 int dpy_fmt = SDL_IYUV_OVERLAY; /* YV12 causes flicker in SDL */
2901 * initialize the SDL environment. We have one large window
2902 * with local and remote video, and a keypad.
2903 * At the moment we arrange them statically, as follows:
2904 * - on the left, the remote video;
2905 * - on the center, the keypad
2906 * - on the right, the local video
2909 /* Fetch the keypad now, we need it to know its size */
2910 if (!env->gui.keypad)
2911 env->gui.keypad = get_keypad(env->keypad_file);
2912 if (env->gui.keypad) {
2917 env->out.keypad_dpy.w = env->gui.keypad->w;
2918 env->out.keypad_dpy.h = env->gui.keypad->h;
2920 * If the keypad image has a comment field, try to read
2921 * the button location from there. The block must be
2922 * keypad_entry = token shape x0 y0 x1 y1 h
2924 * (basically, lines have the same format as config file entries.
2925 * same as the keypad_entry.
2926 * You can add it to a jpeg file using wrjpgcom
2928 do { /* only once, in fact */
2929 const unsigned char *s, *e;
2931 fd = open(env->keypad_file, O_RDONLY);
2933 ast_log(LOG_WARNING, "fail to open %s\n", env->keypad_file);
2936 l = lseek(fd, 0, SEEK_END);
2938 ast_log(LOG_WARNING, "fail to lseek %s\n", env->keypad_file);
2941 p = mmap(NULL, l, PROT_READ, 0, fd, 0);
2943 ast_log(LOG_WARNING, "fail to mmap %s size %ld\n", env->keypad_file, (long)l);
2946 e = (const unsigned char *)p + l;
2947 for (s = p; s < e - 20 ; s++) {
2948 if (!memcmp(s, "keypad_entry", 12)) { /* keyword found */
2949 ast_log(LOG_WARNING, "found entry\n");
2953 for ( ;s < e - 20; s++) {
2955 const unsigned char *s1;
2956 if (index(" \t\r\n", *s)) /* ignore blanks */
2958 if (*s > 127) /* likely end of comment */
2960 if (memcmp(s, "keypad_entry", 12)) /* keyword not found */
2963 l = MIN(sizeof(buf), e - s);
2964 ast_copy_string(buf, s, l);
2965 s1 = ast_skip_blanks(buf); /* between token and '=' */
2966 if (*s1++ != '=') /* missing separator */
2968 if (*s1 == '>') /* skip => */
2970 keypad_cfg_read(&env->gui, ast_skip_blanks(s1));
2971 /* now wait for a newline */
2973 while (s1 < e - 20 && !index("\r\n", *s1) && *s1 < 128)
2983 #define BORDER 5 /* border around our windows */
2984 maxw = env->in.rem_dpy.w + env->out.loc_dpy.w + env->out.keypad_dpy.w;
2985 maxh = MAX( MAX(env->in.rem_dpy.h, env->out.loc_dpy.h), env->out.keypad_dpy.h);
2988 env->screen = SDL_SetVideoMode(maxw, maxh, 0, 0);
2990 ast_log(LOG_ERROR, "SDL: could not set video mode - exiting\n");
2994 SDL_WM_SetCaption("Asterisk console Video Output", NULL);
2995 if (set_win(env->screen, &env->win[WIN_REMOTE], dpy_fmt,
2996 env->in.rem_dpy.w, env->in.rem_dpy.h, BORDER, BORDER))
2998 if (set_win(env->screen, &env->win[WIN_LOCAL], dpy_fmt,
2999 env->out.loc_dpy.w, env->out.loc_dpy.h,
3000 3*BORDER+env->in.rem_dpy.w + env->out.keypad_dpy.w, BORDER))
3003 /* display the skin, but do not free it as we need it later to
3004 * restore text areas and maybe sliders too.
3006 if (env->gui.keypad) {
3007 struct SDL_Rect *dest = &env->win[WIN_KEYPAD].rect;
3008 dest->x = 2*BORDER + env->in.rem_dpy.w;
3010 dest->w = env->gui.keypad->w;
3011 dest->h = env->gui.keypad->h;
3012 SDL_BlitSurface(env->gui.keypad, NULL, env->screen, dest);
3013 SDL_UpdateRects(env->screen, 1, dest);
3015 env->in.dec_in_cur = &env->in.dec_in[0];
3016 env->in.dec_in_dpy = NULL; /* nothing to display */
3020 if (env->sdl_ok == 0) /* free resources in case of errors */
3025 * Parse a geometry string, accepting also common names for the formats.
3026 * Trick: if we have a leading > or < and a numeric geometry,
3027 * return the larger or smaller one.
3028 * E.g. <352x288 gives the smaller one, 320x240
3030 static int video_geom(struct fbuf_t *b, const char *s)
3035 const char *s; int w; int h;
3036 } *fp, formats[] = {
3039 {"qvga", 320, 240 },
3040 {"qcif", 176, 144 },
3041 {"sqcif", 128, 96 },
3044 if (*s == '<' || *s == '>')
3045 sscanf(s+1,"%dx%d", &w, &h);
3046 for (fp = formats; fp->s; fp++) {
3047 if (*s == '>') { /* look for a larger one */
3050 fp--; /* back one step if possible */
3053 } else if (*s == '<') { /* look for a smaller one */
3056 } else if (!strcasecmp(s, fp->s)) { /* look for a string */
3060 if (*s == '<' && fp->s == NULL) /* smallest */
3065 } else if (sscanf(s, "%dx%d", &b->w, &b->h) != 2) {
3066 ast_log(LOG_WARNING, "Invalid video_size %s, using 352x288\n", s);
3074 * Functions to determine if a point is within a region. Return 1 if success.
3075 * First rotate the point, with
3076 * x' = (x - x0) * cos A + (y - y0) * sin A
3077 * y' = -(x - x0) * sin A + (y - y0) * cos A
3078 * where cos A = (x1-x0)/l, sin A = (y1 - y0)/l, and
3079 * l = sqrt( (x1-x0)^2 + (y1-y0)^2
3080 * Then determine inclusion by simple comparisons i.e.:
3081 * rectangle: x >= 0 && x < l && y >= 0 && y < h
3082 * ellipse: (x-xc)^2/l^2 + (y-yc)^2/h2 < 1
3084 static int kp_match_area(const struct keypad_entry *e, int x, int y)
3086 double xp, dx = (e->x1 - e->x0);
3087 double yp, dy = (e->y1 - e->y0);
3088 double l = sqrt(dx*dx + dy*dy);
3091 if (l > 1) { /* large enough */
3092 xp = ((x - e->x0)*dx + (y - e->y0)*dy)/l;
3093 yp = (-(x - e->x0)*dy + (y - e->y0)*dx)/l;
3094 if (e->type == KP_RECT) {
3095 ret = (xp >= 0 && xp < l && yp >=0 && yp < l);
3096 } else if (e->type == KP_CIRCLE) {
3097 dx = xp*xp/(l*l) + yp*yp/(e->h*e->h);
3102 ast_log(LOG_WARNING, "result %d [%d] for match %d,%d in type %d p0 %d,%d p1 %d,%d h %d\n",
3103 ret, e->c, x, y, e->type, e->x0, e->y0, e->x1, e->y1, e->h);
3109 * read a keypad entry line in the format
3111 * token circle xc yc diameter
3112 * token circle xc yc x1 y1 h # ellipse, main diameter and height
3113 * token rect x0 y0 x1 y1 h # rectangle with main side and eight
3114 * token is the token to be returned, either a character or a symbol
3117 struct _s_k { const char *s; int k; };
3118 static struct _s_k gui_key_map[] = {
3119 {"PICK_UP", KEY_PICK_UP },
3120 {"PICKUP", KEY_PICK_UP },
3121 {"HANG_UP", KEY_HANG_UP },
3122 {"HANGUP", KEY_HANG_UP },
3123 {"MUTE", KEY_MUTE },
3124 {"AUTOANSWER", KEY_AUTOANSWER },
3125 {"SENDVIDEO", KEY_SENDVIDEO },
3126 {"LOCALVIDEO", KEY_LOCALVIDEO },
3127 {"REMOTEVIDEO", KEY_REMOTEVIDEO },
3128 {"WRITEMESSAGE", KEY_WRITEMESSAGE },
3129 {"GUI_CLOSE", KEY_GUI_CLOSE },
3132 static int keypad_cfg_read(struct gui_info *gui, const char *val)
3134 struct keypad_entry e;
3135 char s1[16], s2[16];
3138 bzero(&e, sizeof(e));
3139 i = sscanf(val, "%14s %14s %d %d %d %d %d",
3140 s1, s2, &e.x0, &e.y0, &e.x1, &e.y1, &e.h);