summaryrefslogtreecommitdiff
path: root/ffmpeg/libavdevice
diff options
context:
space:
mode:
Diffstat (limited to 'ffmpeg/libavdevice')
-rw-r--r--ffmpeg/libavdevice/Makefile22
-rw-r--r--ffmpeg/libavdevice/alldevices.c7
-rw-r--r--ffmpeg/libavdevice/alsa-audio-enc.c13
-rw-r--r--ffmpeg/libavdevice/alsa-audio.h1
-rw-r--r--ffmpeg/libavdevice/bktr.c5
-rw-r--r--ffmpeg/libavdevice/dshow.c4
-rw-r--r--ffmpeg/libavdevice/dshow_pin.c2
-rw-r--r--ffmpeg/libavdevice/dv1394.c3
-rw-r--r--ffmpeg/libavdevice/fbdev.c270
-rw-r--r--ffmpeg/libavdevice/jack_audio.c4
-rw-r--r--ffmpeg/libavdevice/lavfi.c28
-rw-r--r--ffmpeg/libavdevice/libavdevice.pc8
-rw-r--r--ffmpeg/libavdevice/libcdio.c8
-rw-r--r--ffmpeg/libavdevice/oss_audio.c5
-rw-r--r--ffmpeg/libavdevice/pulse.c190
-rw-r--r--ffmpeg/libavdevice/sdl.c266
-rw-r--r--ffmpeg/libavdevice/timefilter.c12
-rw-r--r--ffmpeg/libavdevice/timefilter.h2
-rw-r--r--ffmpeg/libavdevice/v4l.c1
-rw-r--r--ffmpeg/libavdevice/v4l2.c248
-rw-r--r--ffmpeg/libavdevice/version.h6
-rw-r--r--ffmpeg/libavdevice/vfwcap.c2
-rw-r--r--ffmpeg/libavdevice/x11grab.c42
23 files changed, 415 insertions, 734 deletions
diff --git a/ffmpeg/libavdevice/Makefile b/ffmpeg/libavdevice/Makefile
index efffa8b..531818a 100644
--- a/ffmpeg/libavdevice/Makefile
+++ b/ffmpeg/libavdevice/Makefile
@@ -21,27 +21,43 @@ OBJS-$(CONFIG_DSHOW_INDEV) += dshow.o dshow_enummediatypes.o \
dshow_enumpins.o dshow_filter.o \
dshow_pin.o dshow_common.o
OBJS-$(CONFIG_DV1394_INDEV) += dv1394.o
-OBJS-$(CONFIG_FBDEV_INDEV) += fbdev.o
+OBJS-$(CONFIG_FBDEV_INDEV) += fbdev_dec.o \
+ fbdev_common.o
+OBJS-$(CONFIG_FBDEV_OUTDEV) += fbdev_enc.o \
+ fbdev_common.o
OBJS-$(CONFIG_IEC61883_INDEV) += iec61883.o
OBJS-$(CONFIG_JACK_INDEV) += jack_audio.o timefilter.o
OBJS-$(CONFIG_LAVFI_INDEV) += lavfi.o
OBJS-$(CONFIG_OPENAL_INDEV) += openal-dec.o
OBJS-$(CONFIG_OSS_INDEV) += oss_audio.o
OBJS-$(CONFIG_OSS_OUTDEV) += oss_audio.o
-OBJS-$(CONFIG_PULSE_INDEV) += pulse.o
+OBJS-$(CONFIG_PULSE_INDEV) += pulse_audio_dec.o \
+ pulse_audio_common.o
+OBJS-$(CONFIG_PULSE_OUTDEV) += pulse_audio_enc.o \
+ pulse_audio_common.o
OBJS-$(CONFIG_SDL_OUTDEV) += sdl.o
OBJS-$(CONFIG_SNDIO_INDEV) += sndio_common.o sndio_dec.o
OBJS-$(CONFIG_SNDIO_OUTDEV) += sndio_common.o sndio_enc.o
-OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o timefilter.o
+OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o v4l2-common.o timefilter.o
+OBJS-$(CONFIG_V4L2_OUTDEV) += v4l2enc.o v4l2-common.o
OBJS-$(CONFIG_V4L_INDEV) += v4l.o
OBJS-$(CONFIG_VFWCAP_INDEV) += vfwcap.o
OBJS-$(CONFIG_X11GRAB_INDEV) += x11grab.o
+OBJS-$(CONFIG_XV_OUTDEV) += xv.o
# external libraries
OBJS-$(CONFIG_LIBCDIO_INDEV) += libcdio.o
OBJS-$(CONFIG_LIBDC1394_INDEV) += libdc1394.o
+OBJS-$(HAVE_LIBC_MSVCRT) += file_open.o
+
+# Windows resource file
+SLIBOBJS-$(HAVE_GNU_WINDRES) += avdeviceres.o
+
SKIPHEADERS-$(CONFIG_DSHOW_INDEV) += dshow_capture.h
+SKIPHEADERS-$(CONFIG_LIBPULSE) += pulse_audio_common.h
+SKIPHEADERS-$(CONFIG_V4L2_INDEV) += v4l2-common.h
+SKIPHEADERS-$(CONFIG_V4L2_OUTDEV) += v4l2-common.h
SKIPHEADERS-$(HAVE_ALSA_ASOUNDLIB_H) += alsa-audio.h
SKIPHEADERS-$(HAVE_SNDIO_H) += sndio_common.h
diff --git a/ffmpeg/libavdevice/alldevices.c b/ffmpeg/libavdevice/alldevices.c
index daa6638..5178f30 100644
--- a/ffmpeg/libavdevice/alldevices.c
+++ b/ffmpeg/libavdevice/alldevices.c
@@ -51,19 +51,20 @@ void avdevice_register_all(void)
REGISTER_OUTDEV (CACA, caca);
REGISTER_INDEV (DSHOW, dshow);
REGISTER_INDEV (DV1394, dv1394);
- REGISTER_INDEV (FBDEV, fbdev);
+ REGISTER_INOUTDEV(FBDEV, fbdev);
REGISTER_INDEV (IEC61883, iec61883);
REGISTER_INDEV (JACK, jack);
REGISTER_INDEV (LAVFI, lavfi);
REGISTER_INDEV (OPENAL, openal);
REGISTER_INOUTDEV(OSS, oss);
- REGISTER_INDEV (PULSE, pulse);
+ REGISTER_INOUTDEV(PULSE, pulse);
REGISTER_OUTDEV (SDL, sdl);
REGISTER_INOUTDEV(SNDIO, sndio);
- REGISTER_INDEV (V4L2, v4l2);
+ REGISTER_INOUTDEV(V4L2, v4l2);
// REGISTER_INDEV (V4L, v4l
REGISTER_INDEV (VFWCAP, vfwcap);
REGISTER_INDEV (X11GRAB, x11grab);
+ REGISTER_OUTDEV (XV, xv);
/* external libraries */
REGISTER_INDEV (LIBCDIO, libcdio);
diff --git a/ffmpeg/libavdevice/alsa-audio-enc.c b/ffmpeg/libavdevice/alsa-audio-enc.c
index 0f4e4a2..83e1d2f 100644
--- a/ffmpeg/libavdevice/alsa-audio-enc.c
+++ b/ffmpeg/libavdevice/alsa-audio-enc.c
@@ -47,12 +47,17 @@
static av_cold int audio_write_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
- AVStream *st;
+ AVStream *st = NULL;
unsigned int sample_rate;
enum AVCodecID codec_id;
int res;
+ if (s1->nb_streams != 1 || s1->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
+ av_log(s1, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
+ return AVERROR(EINVAL);
+ }
st = s1->streams[0];
+
sample_rate = st->codec->sample_rate;
codec_id = st->codec->codec_id;
res = ff_alsa_open(s1, SND_PCM_STREAM_PLAYBACK, &sample_rate,
@@ -80,6 +85,10 @@ static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
uint8_t *buf = pkt->data;
size /= s->frame_size;
+ if (pkt->dts != AV_NOPTS_VALUE)
+ s->timestamp = pkt->dts;
+ s->timestamp += pkt->duration ? pkt->duration : size;
+
if (s->reorder_func) {
if (size > s->reorder_buf_size)
if (ff_alsa_extend_reorder_buf(s, size))
@@ -112,7 +121,7 @@ audio_get_output_timestamp(AVFormatContext *s1, int stream,
snd_pcm_sframes_t delay = 0;
*wall = av_gettime();
snd_pcm_delay(s->h, &delay);
- *dts = s1->streams[0]->cur_dts - delay;
+ *dts = s->timestamp - delay;
}
AVOutputFormat ff_alsa_muxer = {
diff --git a/ffmpeg/libavdevice/alsa-audio.h b/ffmpeg/libavdevice/alsa-audio.h
index 44b7c72..583c911 100644
--- a/ffmpeg/libavdevice/alsa-audio.h
+++ b/ffmpeg/libavdevice/alsa-audio.h
@@ -57,6 +57,7 @@ typedef struct AlsaData {
void (*reorder_func)(const void *, void *, int);
void *reorder_buf;
int reorder_buf_size; ///< in frames
+ int64_t timestamp; ///< current timestamp, without latency applied.
} AlsaData;
/**
diff --git a/ffmpeg/libavdevice/bktr.c b/ffmpeg/libavdevice/bktr.c
index b0dbe60..4e25aa6 100644
--- a/ffmpeg/libavdevice/bktr.c
+++ b/ffmpeg/libavdevice/bktr.c
@@ -25,6 +25,7 @@
*/
#include "libavformat/internal.h"
+#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
@@ -135,11 +136,11 @@ static av_cold int bktr_init(const char *video_device, int width, int height,
act.sa_handler = catchsignal;
sigaction(SIGUSR1, &act, &old);
- *tuner_fd = open("/dev/tuner0", O_RDONLY);
+ *tuner_fd = avpriv_open("/dev/tuner0", O_RDONLY);
if (*tuner_fd < 0)
av_log(NULL, AV_LOG_ERROR, "Warning. Tuner not opened, continuing: %s\n", strerror(errno));
- *video_fd = open(video_device, O_RDONLY);
+ *video_fd = avpriv_open(video_device, O_RDONLY);
if (*video_fd < 0) {
av_log(NULL, AV_LOG_ERROR, "%s: %s\n", video_device, strerror(errno));
return -1;
diff --git a/ffmpeg/libavdevice/dshow.c b/ffmpeg/libavdevice/dshow.c
index 8cc12f5..5293d26 100644
--- a/ffmpeg/libavdevice/dshow.c
+++ b/ffmpeg/libavdevice/dshow.c
@@ -183,7 +183,7 @@ static char *dup_wchar_to_utf8(wchar_t *w)
static int shall_we_drop(AVFormatContext *s)
{
struct dshow_ctx *ctx = s->priv_data;
- const uint8_t dropscore[] = {62, 75, 87, 100};
+ static const uint8_t dropscore[] = {62, 75, 87, 100};
const int ndropscores = FF_ARRAY_ELEMS(dropscore);
unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
@@ -1059,7 +1059,7 @@ static int dshow_read_packet(AVFormatContext *s, AVPacket *pkt)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "set video size given a string such as 640x480 or hd720.", OFFSET(requested_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
- { "pixel_format", "set video pixel format", OFFSET(pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, -1, AV_PIX_FMT_NB-1, DEC },
+ { "pixel_format", "set video pixel format", OFFSET(pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, -1, INT_MAX, DEC },
{ "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "sample_rate", "set audio sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
{ "sample_size", "set audio sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 16, DEC },
diff --git a/ffmpeg/libavdevice/dshow_pin.c b/ffmpeg/libavdevice/dshow_pin.c
index 4953642..30e4d95 100644
--- a/ffmpeg/libavdevice/dshow_pin.c
+++ b/ffmpeg/libavdevice/dshow_pin.c
@@ -358,5 +358,5 @@ libAVMemInputPin_Destroy(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_Destroy(%p)\n", this);
- return libAVPin_Destroy(pin);
+ libAVPin_Destroy(pin);
}
diff --git a/ffmpeg/libavdevice/dv1394.c b/ffmpeg/libavdevice/dv1394.c
index 5d94f5c..0af5ea5 100644
--- a/ffmpeg/libavdevice/dv1394.c
+++ b/ffmpeg/libavdevice/dv1394.c
@@ -27,6 +27,7 @@
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avdevice.h"
@@ -88,7 +89,7 @@ static int dv1394_read_header(AVFormatContext * context)
goto failed;
/* Open and initialize DV1394 device */
- dv->fd = open(context->filename, O_RDONLY);
+ dv->fd = avpriv_open(context->filename, O_RDONLY);
if (dv->fd < 0) {
av_log(context, AV_LOG_ERROR, "Failed to open DV interface: %s\n", strerror(errno));
goto failed;
diff --git a/ffmpeg/libavdevice/fbdev.c b/ffmpeg/libavdevice/fbdev.c
deleted file mode 100644
index 30595bd..0000000
--- a/ffmpeg/libavdevice/fbdev.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2011 Stefano Sabatini
- * Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
- * Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * Linux framebuffer input device,
- * inspired by code from fbgrab.c by Gunnar Monell.
- * @see http://linux-fbdev.sourceforge.net/
- */
-
-/* #define DEBUG */
-
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <time.h>
-#include <linux/fb.h>
-
-#include "libavutil/log.h"
-#include "libavutil/mem.h"
-#include "libavutil/opt.h"
-#include "libavutil/time.h"
-#include "libavutil/parseutils.h"
-#include "libavutil/pixdesc.h"
-#include "avdevice.h"
-#include "libavformat/internal.h"
-
-struct rgb_pixfmt_map_entry {
- int bits_per_pixel;
- int red_offset, green_offset, blue_offset, alpha_offset;
- enum AVPixelFormat pixfmt;
-};
-
-static const struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
- // bpp, red_offset, green_offset, blue_offset, alpha_offset, pixfmt
- { 32, 0, 8, 16, 24, AV_PIX_FMT_RGBA },
- { 32, 16, 8, 0, 24, AV_PIX_FMT_BGRA },
- { 32, 8, 16, 24, 0, AV_PIX_FMT_ARGB },
- { 32, 3, 2, 8, 0, AV_PIX_FMT_ABGR },
- { 24, 0, 8, 16, 0, AV_PIX_FMT_RGB24 },
- { 24, 16, 8, 0, 0, AV_PIX_FMT_BGR24 },
-};
-
-static enum AVPixelFormat get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(rgb_pixfmt_map); i++) {
- const struct rgb_pixfmt_map_entry *entry = &rgb_pixfmt_map[i];
- if (entry->bits_per_pixel == varinfo->bits_per_pixel &&
- entry->red_offset == varinfo->red.offset &&
- entry->green_offset == varinfo->green.offset &&
- entry->blue_offset == varinfo->blue.offset)
- return entry->pixfmt;
- }
-
- return AV_PIX_FMT_NONE;
-}
-
-typedef struct {
- AVClass *class; ///< class for private options
- int frame_size; ///< size in bytes of a grabbed frame
- AVRational framerate_q; ///< framerate
- char *framerate; ///< framerate string set by a private option
- int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units)
-
- int fd; ///< framebuffer device file descriptor
- int width, height; ///< assumed frame resolution
- int frame_linesize; ///< linesize of the output frame, it is assumed to be constant
- int bytes_per_pixel;
-
- struct fb_var_screeninfo varinfo; ///< variable info;
- struct fb_fix_screeninfo fixinfo; ///< fixed info;
-
- uint8_t *data; ///< framebuffer data
-} FBDevContext;
-
-static av_cold int fbdev_read_header(AVFormatContext *avctx)
-{
- FBDevContext *fbdev = avctx->priv_data;
- AVStream *st = NULL;
- enum AVPixelFormat pix_fmt;
- int ret, flags = O_RDONLY;
-
- ret = av_parse_video_rate(&fbdev->framerate_q, fbdev->framerate);
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", fbdev->framerate);
- return ret;
- }
-
- if (!(st = avformat_new_stream(avctx, NULL)))
- return AVERROR(ENOMEM);
- avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */
-
- /* NONBLOCK is ignored by the fbdev driver, only set for consistency */
- if (avctx->flags & AVFMT_FLAG_NONBLOCK)
- flags |= O_NONBLOCK;
-
- if ((fbdev->fd = open(avctx->filename, flags)) == -1) {
- ret = AVERROR(errno);
- av_log(avctx, AV_LOG_ERROR,
- "Could not open framebuffer device '%s': %s\n",
- avctx->filename, strerror(ret));
- return ret;
- }
-
- if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
- ret = AVERROR(errno);
- av_log(avctx, AV_LOG_ERROR,
- "FBIOGET_VSCREENINFO: %s\n", strerror(errno));
- goto fail;
- }
-
- if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
- ret = AVERROR(errno);
- av_log(avctx, AV_LOG_ERROR,
- "FBIOGET_FSCREENINFO: %s\n", strerror(errno));
- goto fail;
- }
-
- pix_fmt = get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
- if (pix_fmt == AV_PIX_FMT_NONE) {
- ret = AVERROR(EINVAL);
- av_log(avctx, AV_LOG_ERROR,
- "Framebuffer pixel format not supported.\n");
- goto fail;
- }
-
- fbdev->width = fbdev->varinfo.xres;
- fbdev->height = fbdev->varinfo.yres;
- fbdev->bytes_per_pixel = (fbdev->varinfo.bits_per_pixel + 7) >> 3;
- fbdev->frame_linesize = fbdev->width * fbdev->bytes_per_pixel;
- fbdev->frame_size = fbdev->frame_linesize * fbdev->height;
- fbdev->time_frame = AV_NOPTS_VALUE;
- fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_READ, MAP_SHARED, fbdev->fd, 0);
- if (fbdev->data == MAP_FAILED) {
- ret = AVERROR(errno);
- av_log(avctx, AV_LOG_ERROR, "Error in mmap(): %s\n", strerror(errno));
- goto fail;
- }
-
- st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
- st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
- st->codec->width = fbdev->width;
- st->codec->height = fbdev->height;
- st->codec->pix_fmt = pix_fmt;
- st->codec->time_base = av_inv_q(fbdev->framerate_q);
- st->codec->bit_rate =
- fbdev->width * fbdev->height * fbdev->bytes_per_pixel * av_q2d(fbdev->framerate_q) * 8;
-
- av_log(avctx, AV_LOG_INFO,
- "w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%d\n",
- fbdev->width, fbdev->height, fbdev->varinfo.bits_per_pixel,
- av_get_pix_fmt_name(pix_fmt),
- fbdev->framerate_q.num, fbdev->framerate_q.den,
- st->codec->bit_rate);
- return 0;
-
-fail:
- close(fbdev->fd);
- return ret;
-}
-
-static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt)
-{
- FBDevContext *fbdev = avctx->priv_data;
- int64_t curtime, delay;
- struct timespec ts;
- int i, ret;
- uint8_t *pin, *pout;
-
- if (fbdev->time_frame == AV_NOPTS_VALUE)
- fbdev->time_frame = av_gettime();
-
- /* wait based on the frame rate */
- while (1) {
- curtime = av_gettime();
- delay = fbdev->time_frame - curtime;
- av_dlog(avctx,
- "time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
- fbdev->time_frame, curtime, delay);
- if (delay <= 0) {
- fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q);
- break;
- }
- if (avctx->flags & AVFMT_FLAG_NONBLOCK)
- return AVERROR(EAGAIN);
- ts.tv_sec = delay / 1000000;
- ts.tv_nsec = (delay % 1000000) * 1000;
- while (nanosleep(&ts, &ts) < 0 && errno == EINTR);
- }
-
- if ((ret = av_new_packet(pkt, fbdev->frame_size)) < 0)
- return ret;
-
- /* refresh fbdev->varinfo, visible data position may change at each call */
- if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0)
- av_log(avctx, AV_LOG_WARNING,
- "Error refreshing variable info: %s\n", strerror(errno));
-
- pkt->pts = curtime;
-
- /* compute visible data offset */
- pin = fbdev->data + fbdev->bytes_per_pixel * fbdev->varinfo.xoffset +
- fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
- pout = pkt->data;
-
- for (i = 0; i < fbdev->height; i++) {
- memcpy(pout, pin, fbdev->frame_linesize);
- pin += fbdev->fixinfo.line_length;
- pout += fbdev->frame_linesize;
- }
-
- return fbdev->frame_size;
-}
-
-static av_cold int fbdev_read_close(AVFormatContext *avctx)
-{
- FBDevContext *fbdev = avctx->priv_data;
-
- munmap(fbdev->data, fbdev->frame_size);
- close(fbdev->fd);
-
- return 0;
-}
-
-#define OFFSET(x) offsetof(FBDevContext, x)
-#define DEC AV_OPT_FLAG_DECODING_PARAM
-static const AVOption options[] = {
- { "framerate","", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "25"}, 0, 0, DEC },
- { NULL },
-};
-
-static const AVClass fbdev_class = {
- .class_name = "fbdev indev",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-AVInputFormat ff_fbdev_demuxer = {
- .name = "fbdev",
- .long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"),
- .priv_data_size = sizeof(FBDevContext),
- .read_header = fbdev_read_header,
- .read_packet = fbdev_read_packet,
- .read_close = fbdev_read_close,
- .flags = AVFMT_NOFILE,
- .priv_class = &fbdev_class,
-};
diff --git a/ffmpeg/libavdevice/jack_audio.c b/ffmpeg/libavdevice/jack_audio.c
index bd6a770..5ba6731 100644
--- a/ffmpeg/libavdevice/jack_audio.c
+++ b/ffmpeg/libavdevice/jack_audio.c
@@ -188,6 +188,10 @@ static int start_jack(AVFormatContext *context)
/* Create time filter */
self->timefilter = ff_timefilter_new (1.0 / self->sample_rate, self->buffer_size, 1.5);
+ if (!self->timefilter) {
+ jack_client_close(self->client);
+ return AVERROR(ENOMEM);
+ }
/* Create FIFO buffers */
self->filled_pkts = av_fifo_alloc(FIFO_PACKETS_NUM * sizeof(AVPacket));
diff --git a/ffmpeg/libavdevice/lavfi.c b/ffmpeg/libavdevice/lavfi.c
index 159832a..a177ad0 100644
--- a/ffmpeg/libavdevice/lavfi.c
+++ b/ffmpeg/libavdevice/lavfi.c
@@ -60,7 +60,7 @@ static int *create_all_formats(int n)
for (i = 0; i < n; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
- if (!(desc->flags & PIX_FMT_HWACCEL))
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
count++;
}
@@ -68,7 +68,7 @@ static int *create_all_formats(int n)
return NULL;
for (j = 0, i = 0; i < n; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
- if (!(desc->flags & PIX_FMT_HWACCEL))
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
fmts[j++] = i;
}
fmts[j] = -1;
@@ -141,7 +141,7 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
if (!(lavfi->graph = avfilter_graph_alloc()))
FAIL(AVERROR(ENOMEM));
- if ((ret = avfilter_graph_parse(lavfi->graph, lavfi->graph_str,
+ if ((ret = avfilter_graph_parse_ptr(lavfi->graph, lavfi->graph_str,
&input_links, &output_links, avctx)) < 0)
FAIL(ret);
@@ -227,14 +227,11 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
}
if (type == AVMEDIA_TYPE_VIDEO) {
- AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
-
- buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
- buffersink_params, lavfi->graph);
- av_freep(&buffersink_params);
-
+ NULL, lavfi->graph);
+ if (ret >= 0)
+ ret = av_opt_set_int_list(sink, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
} else if (type == AVMEDIA_TYPE_AUDIO) {
@@ -243,13 +240,16 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL, -1 };
- AVABufferSinkParams *abuffersink_params = av_abuffersink_params_alloc();
- abuffersink_params->sample_fmts = sample_fmts;
ret = avfilter_graph_create_filter(&sink, abuffersink,
inout->name, NULL,
- abuffersink_params, lavfi->graph);
- av_free(abuffersink_params);
+ NULL, lavfi->graph);
+ if (ret >= 0)
+ ret = av_opt_set_int_list(sink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ ret = av_opt_set_int(sink, "all_channel_counts", 1,
+ AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
}
@@ -290,7 +290,7 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
30);
} else if (link->type == AVMEDIA_TYPE_AUDIO) {
st->codec->codec_id = av_get_pcm_codec(link->format, -1);
- st->codec->channels = av_get_channel_layout_nb_channels(link->channel_layout);
+ st->codec->channels = avfilter_link_get_channels(link);
st->codec->sample_fmt = link->format;
st->codec->sample_rate = link->sample_rate;
st->codec->time_base = link->time_base;
diff --git a/ffmpeg/libavdevice/libavdevice.pc b/ffmpeg/libavdevice/libavdevice.pc
index ae1f257..30f2ff7 100644
--- a/ffmpeg/libavdevice/libavdevice.pc
+++ b/ffmpeg/libavdevice/libavdevice.pc
@@ -5,10 +5,10 @@ includedir=${prefix}/include
Name: libavdevice
Description: FFmpeg device handling library
-Version: 55.0.100
+Version: 55.5.102
Requires:
-Requires.private: libavfilter = 3.48.100, libavformat = 55.0.100
+Requires.private: libavfilter = 4.0.103, libavformat = 55.22.100
Conflicts:
-Libs: -L${libdir} -lavdevice
-Libs.private: -ldl -lXfixes -lXext -lX11 -ljack -lasound -lxvidcore -lx264 -lvorbisenc -lvorbis -logg -ltheoraenc -ltheoradec -logg -lschroedinger-1.0 -lmp3lame -lfaac -lm -pthread -lz -lrt
+Libs: -L${libdir} -lavdevice
+Libs.private: -lXfixes -lXext -lX11 -lx264 -lmp3lame -lm -lz -pthread
Cflags: -I${includedir}
diff --git a/ffmpeg/libavdevice/libcdio.c b/ffmpeg/libavdevice/libcdio.c
index a824bc3..91052cc 100644
--- a/ffmpeg/libavdevice/libcdio.c
+++ b/ffmpeg/libavdevice/libcdio.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2011 Anton Khirnov <anton@khirnov.net>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/ffmpeg/libavdevice/oss_audio.c b/ffmpeg/libavdevice/oss_audio.c
index aa40034..916908c 100644
--- a/ffmpeg/libavdevice/oss_audio.c
+++ b/ffmpeg/libavdevice/oss_audio.c
@@ -34,6 +34,7 @@
#include <fcntl.h>
#include <sys/ioctl.h>
+#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
@@ -63,9 +64,9 @@ static int audio_open(AVFormatContext *s1, int is_output, const char *audio_devi
char *flip = getenv("AUDIO_FLIP_LEFT");
if (is_output)
- audio_fd = open(audio_device, O_WRONLY);
+ audio_fd = avpriv_open(audio_device, O_WRONLY);
else
- audio_fd = open(audio_device, O_RDONLY);
+ audio_fd = avpriv_open(audio_device, O_RDONLY);
if (audio_fd < 0) {
av_log(s1, AV_LOG_ERROR, "%s: %s\n", audio_device, strerror(errno));
return AVERROR(EIO);
diff --git a/ffmpeg/libavdevice/pulse.c b/ffmpeg/libavdevice/pulse.c
deleted file mode 100644
index 86fdc22..0000000
--- a/ffmpeg/libavdevice/pulse.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Pulseaudio input
- * Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * PulseAudio input using the simple API.
- * @author Luca Barbato <lu_zero@gentoo.org>
- */
-
-#include <pulse/simple.h>
-#include <pulse/rtclock.h>
-#include <pulse/error.h>
-
-#include "libavformat/avformat.h"
-#include "libavformat/internal.h"
-#include "libavutil/opt.h"
-
-#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
-
-typedef struct PulseData {
- AVClass *class;
- char *server;
- char *name;
- char *stream_name;
- int sample_rate;
- int channels;
- int frame_size;
- int fragment_size;
- pa_simple *s;
- int64_t pts;
- int64_t frame_duration;
-} PulseData;
-
-static pa_sample_format_t codec_id_to_pulse_format(int codec_id) {
- switch (codec_id) {
- case AV_CODEC_ID_PCM_U8: return PA_SAMPLE_U8;
- case AV_CODEC_ID_PCM_ALAW: return PA_SAMPLE_ALAW;
- case AV_CODEC_ID_PCM_MULAW: return PA_SAMPLE_ULAW;
- case AV_CODEC_ID_PCM_S16LE: return PA_SAMPLE_S16LE;
- case AV_CODEC_ID_PCM_S16BE: return PA_SAMPLE_S16BE;
- case AV_CODEC_ID_PCM_F32LE: return PA_SAMPLE_FLOAT32LE;
- case AV_CODEC_ID_PCM_F32BE: return PA_SAMPLE_FLOAT32BE;
- case AV_CODEC_ID_PCM_S32LE: return PA_SAMPLE_S32LE;
- case AV_CODEC_ID_PCM_S32BE: return PA_SAMPLE_S32BE;
- case AV_CODEC_ID_PCM_S24LE: return PA_SAMPLE_S24LE;
- case AV_CODEC_ID_PCM_S24BE: return PA_SAMPLE_S24BE;
- default: return PA_SAMPLE_INVALID;
- }
-}
-
-static av_cold int pulse_read_header(AVFormatContext *s)
-{
- PulseData *pd = s->priv_data;
- AVStream *st;
- char *device = NULL;
- int ret;
- enum AVCodecID codec_id =
- s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
- const pa_sample_spec ss = { codec_id_to_pulse_format(codec_id),
- pd->sample_rate,
- pd->channels };
-
- pa_buffer_attr attr = { -1 };
-
- st = avformat_new_stream(s, NULL);
-
- if (!st) {
- av_log(s, AV_LOG_ERROR, "Cannot add stream\n");
- return AVERROR(ENOMEM);
- }
-
- attr.fragsize = pd->fragment_size;
-
- if (strcmp(s->filename, "default"))
- device = s->filename;
-
- pd->s = pa_simple_new(pd->server, pd->name,
- PA_STREAM_RECORD,
- device, pd->stream_name, &ss,
- NULL, &attr, &ret);
-
- if (!pd->s) {
- av_log(s, AV_LOG_ERROR, "pa_simple_new failed: %s\n",
- pa_strerror(ret));
- return AVERROR(EIO);
- }
- /* take real parameters */
- st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
- st->codec->codec_id = codec_id;
- st->codec->sample_rate = pd->sample_rate;
- st->codec->channels = pd->channels;
- avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
-
- pd->pts = AV_NOPTS_VALUE;
- pd->frame_duration = (pd->frame_size * 1000000LL * 8) /
- (pd->sample_rate * pd->channels * av_get_bits_per_sample(codec_id));
-
- return 0;
-}
-
-static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
-{
- PulseData *pd = s->priv_data;
- int res;
- pa_usec_t latency;
-
- if (av_new_packet(pkt, pd->frame_size) < 0) {
- return AVERROR(ENOMEM);
- }
-
- if ((pa_simple_read(pd->s, pkt->data, pkt->size, &res)) < 0) {
- av_log(s, AV_LOG_ERROR, "pa_simple_read failed: %s\n",
- pa_strerror(res));
- av_free_packet(pkt);
- return AVERROR(EIO);
- }
-
- if ((latency = pa_simple_get_latency(pd->s, &res)) == (pa_usec_t) -1) {
- av_log(s, AV_LOG_ERROR, "pa_simple_get_latency() failed: %s\n",
- pa_strerror(res));
- return AVERROR(EIO);
- }
-
- if (pd->pts == AV_NOPTS_VALUE) {
- pd->pts = -latency;
- }
-
- pkt->pts = pd->pts;
-
- pd->pts += pd->frame_duration;
-
- return 0;
-}
-
-static av_cold int pulse_close(AVFormatContext *s)
-{
- PulseData *pd = s->priv_data;
- pa_simple_free(pd->s);
- return 0;
-}
-
-#define OFFSET(a) offsetof(PulseData, a)
-#define D AV_OPT_FLAG_DECODING_PARAM
-
-static const AVOption options[] = {
- { "server", "pulse server name", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D },
- { "name", "application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, D },
- { "stream_name", "stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = "record"}, 0, 0, D },
- { "sample_rate", "sample rate in Hz", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, D },
- { "channels", "number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
- { "frame_size", "number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
- { "fragment_size", "buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
- { NULL },
-};
-
-static const AVClass pulse_demuxer_class = {
- .class_name = "Pulse demuxer",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-AVInputFormat ff_pulse_demuxer = {
- .name = "pulse",
- .long_name = NULL_IF_CONFIG_SMALL("Pulse audio input"),
- .priv_data_size = sizeof(PulseData),
- .read_header = pulse_read_header,
- .read_packet = pulse_read_packet,
- .read_close = pulse_close,
- .flags = AVFMT_NOFILE,
- .priv_class = &pulse_demuxer_class,
-};
diff --git a/ffmpeg/libavdevice/sdl.c b/ffmpeg/libavdevice/sdl.c
index e708dfd..72d327e 100644
--- a/ffmpeg/libavdevice/sdl.c
+++ b/ffmpeg/libavdevice/sdl.c
@@ -24,10 +24,13 @@
*/
#include <SDL.h>
+#include <SDL_thread.h>
+
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
#include "avdevice.h"
typedef struct {
@@ -38,10 +41,17 @@ typedef struct {
char *icon_title;
int window_width, window_height; /**< size of the window */
int window_fullscreen;
- int overlay_width, overlay_height; /**< size of the video in the window */
- int overlay_x, overlay_y;
+
+ SDL_Rect overlay_rect;
int overlay_fmt;
+
int sdl_was_already_inited;
+ SDL_Thread *event_thread;
+ SDL_mutex *mutex;
+ SDL_cond *init_cond;
+ int init_ret; /* return code used to signal initialization errors */
+ int inited;
+ int quit;
} SDLContext;
static const struct sdl_overlay_pix_fmt_entry {
@@ -57,27 +67,170 @@ static int sdl_write_trailer(AVFormatContext *s)
{
SDLContext *sdl = s->priv_data;
- av_freep(&sdl->window_title);
- av_freep(&sdl->icon_title);
+ sdl->quit = 1;
- if (sdl->overlay) {
+ if (sdl->overlay)
SDL_FreeYUVOverlay(sdl->overlay);
- sdl->overlay = NULL;
- }
+ if (sdl->event_thread)
+ SDL_WaitThread(sdl->event_thread, NULL);
+ if (sdl->mutex)
+ SDL_DestroyMutex(sdl->mutex);
+ if (sdl->init_cond)
+ SDL_DestroyCond(sdl->init_cond);
+
if (!sdl->sdl_was_already_inited)
SDL_Quit();
return 0;
}
+static void compute_overlay_rect(AVFormatContext *s)
+{
+ AVRational sar, dar; /* sample and display aspect ratios */
+ SDLContext *sdl = s->priv_data;
+ AVStream *st = s->streams[0];
+ AVCodecContext *encctx = st->codec;
+ SDL_Rect *overlay_rect = &sdl->overlay_rect;
+
+ /* compute overlay width and height from the codec context information */
+ sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
+ dar = av_mul_q(sar, (AVRational){ encctx->width, encctx->height });
+
+ /* we suppose the screen has a 1/1 sample aspect ratio */
+ if (sdl->window_width && sdl->window_height) {
+ /* fit in the window */
+ if (av_cmp_q(dar, (AVRational){ sdl->window_width, sdl->window_height }) > 0) {
+ /* fit in width */
+ overlay_rect->w = sdl->window_width;
+ overlay_rect->h = av_rescale(overlay_rect->w, dar.den, dar.num);
+ } else {
+ /* fit in height */
+ overlay_rect->h = sdl->window_height;
+ overlay_rect->w = av_rescale(overlay_rect->h, dar.num, dar.den);
+ }
+ } else {
+ if (sar.num > sar.den) {
+ overlay_rect->w = encctx->width;
+ overlay_rect->h = av_rescale(overlay_rect->w, dar.den, dar.num);
+ } else {
+ overlay_rect->h = encctx->height;
+ overlay_rect->w = av_rescale(overlay_rect->h, dar.num, dar.den);
+ }
+ sdl->window_width = overlay_rect->w;
+ sdl->window_height = overlay_rect->h;
+ }
+
+ overlay_rect->x = (sdl->window_width - overlay_rect->w) / 2;
+ overlay_rect->y = (sdl->window_height - overlay_rect->h) / 2;
+}
+
+#define SDL_BASE_FLAGS (SDL_SWSURFACE|SDL_RESIZABLE)
+
+static int event_thread(void *arg)
+{
+ AVFormatContext *s = arg;
+ SDLContext *sdl = s->priv_data;
+ int flags = SDL_BASE_FLAGS | (sdl->window_fullscreen ? SDL_FULLSCREEN : 0);
+ AVStream *st = s->streams[0];
+ AVCodecContext *encctx = st->codec;
+
+ /* initialization */
+ if (SDL_Init(SDL_INIT_VIDEO) != 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError());
+ sdl->init_ret = AVERROR(EINVAL);
+ goto init_end;
+ }
+
+ SDL_WM_SetCaption(sdl->window_title, sdl->icon_title);
+ sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height,
+ 24, flags);
+ if (!sdl->surface) {
+ av_log(sdl, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError());
+ sdl->init_ret = AVERROR(EINVAL);
+ goto init_end;
+ }
+
+ sdl->overlay = SDL_CreateYUVOverlay(encctx->width, encctx->height,
+ sdl->overlay_fmt, sdl->surface);
+ if (!sdl->overlay || sdl->overlay->pitches[0] < encctx->width) {
+ av_log(s, AV_LOG_ERROR,
+ "SDL does not support an overlay with size of %dx%d pixels\n",
+ encctx->width, encctx->height);
+ sdl->init_ret = AVERROR(EINVAL);
+ goto init_end;
+ }
+
+ sdl->init_ret = 0;
+ av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d\n",
+ encctx->width, encctx->height, av_get_pix_fmt_name(encctx->pix_fmt),
+ sdl->overlay_rect.w, sdl->overlay_rect.h);
+
+init_end:
+ SDL_LockMutex(sdl->mutex);
+ sdl->inited = 1;
+ SDL_UnlockMutex(sdl->mutex);
+ SDL_CondSignal(sdl->init_cond);
+
+ if (sdl->init_ret < 0)
+ return sdl->init_ret;
+
+ /* event loop */
+ while (!sdl->quit) {
+ int ret;
+ SDL_Event event;
+ SDL_PumpEvents();
+ ret = SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_ALLEVENTS);
+ if (ret < 0) {
+ av_log(s, AV_LOG_ERROR, "Error when getting SDL event: %s\n", SDL_GetError());
+ continue;
+ }
+ if (ret == 0) {
+ SDL_Delay(10);
+ continue;
+ }
+
+ switch (event.type) {
+ case SDL_KEYDOWN:
+ switch (event.key.keysym.sym) {
+ case SDLK_ESCAPE:
+ case SDLK_q:
+ sdl->quit = 1;
+ break;
+ }
+ break;
+ case SDL_QUIT:
+ sdl->quit = 1;
+ break;
+
+ case SDL_VIDEORESIZE:
+ sdl->window_width = event.resize.w;
+ sdl->window_height = event.resize.h;
+
+ SDL_LockMutex(sdl->mutex);
+ sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height, 24, SDL_BASE_FLAGS);
+ if (!sdl->surface) {
+ av_log(s, AV_LOG_ERROR, "Failed to set SDL video mode: %s\n", SDL_GetError());
+ sdl->quit = 1;
+ } else {
+ compute_overlay_rect(s);
+ }
+ SDL_UnlockMutex(sdl->mutex);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int sdl_write_header(AVFormatContext *s)
{
SDLContext *sdl = s->priv_data;
AVStream *st = s->streams[0];
AVCodecContext *encctx = st->codec;
- AVRational sar, dar; /* sample and display aspect ratios */
int i, ret;
- int flags = SDL_SWSURFACE | sdl->window_fullscreen ? SDL_FULLSCREEN : 0;
if (!sdl->window_title)
sdl->window_title = av_strdup(s->filename);
@@ -92,12 +245,6 @@ static int sdl_write_header(AVFormatContext *s)
goto fail;
}
- if (SDL_Init(SDL_INIT_VIDEO) != 0) {
- av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError());
- ret = AVERROR(EINVAL);
- goto fail;
- }
-
if ( s->nb_streams > 1
|| encctx->codec_type != AVMEDIA_TYPE_VIDEO
|| encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
@@ -122,57 +269,37 @@ static int sdl_write_header(AVFormatContext *s)
}
/* compute overlay width and height from the codec context information */
- sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
- dar = av_mul_q(sar, (AVRational){ encctx->width, encctx->height });
+ compute_overlay_rect(s);
- /* we suppose the screen has a 1/1 sample aspect ratio */
- if (sdl->window_width && sdl->window_height) {
- /* fit in the window */
- if (av_cmp_q(dar, (AVRational){ sdl->window_width, sdl->window_height }) > 0) {
- /* fit in width */
- sdl->overlay_width = sdl->window_width;
- sdl->overlay_height = av_rescale(sdl->overlay_width, dar.den, dar.num);
- } else {
- /* fit in height */
- sdl->overlay_height = sdl->window_height;
- sdl->overlay_width = av_rescale(sdl->overlay_height, dar.num, dar.den);
- }
- } else {
- if (sar.num > sar.den) {
- sdl->overlay_width = encctx->width;
- sdl->overlay_height = av_rescale(sdl->overlay_width, dar.den, dar.num);
- } else {
- sdl->overlay_height = encctx->height;
- sdl->overlay_width = av_rescale(sdl->overlay_height, dar.num, dar.den);
- }
- sdl->window_width = sdl->overlay_width;
- sdl->window_height = sdl->overlay_height;
+ sdl->init_cond = SDL_CreateCond();
+ if (!sdl->init_cond) {
+ av_log(s, AV_LOG_ERROR, "Could not create SDL condition variable: %s\n", SDL_GetError());
+ ret = AVERROR_EXTERNAL;
+ goto fail;
}
- sdl->overlay_x = (sdl->window_width - sdl->overlay_width ) / 2;
- sdl->overlay_y = (sdl->window_height - sdl->overlay_height) / 2;
-
- SDL_WM_SetCaption(sdl->window_title, sdl->icon_title);
- sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height,
- 24, flags);
- if (!sdl->surface) {
- av_log(s, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError());
- ret = AVERROR(EINVAL);
+ sdl->mutex = SDL_CreateMutex();
+ if (!sdl->mutex) {
+ av_log(s, AV_LOG_ERROR, "Could not create SDL mutex: %s\n", SDL_GetError());
+ ret = AVERROR_EXTERNAL;
goto fail;
}
-
- sdl->overlay = SDL_CreateYUVOverlay(encctx->width, encctx->height,
- sdl->overlay_fmt, sdl->surface);
- if (!sdl->overlay || sdl->overlay->pitches[0] < encctx->width) {
- av_log(s, AV_LOG_ERROR,
- "SDL does not support an overlay with size of %dx%d pixels\n",
- encctx->width, encctx->height);
- ret = AVERROR(EINVAL);
+ sdl->event_thread = SDL_CreateThread(event_thread, s);
+ if (!sdl->event_thread) {
+ av_log(s, AV_LOG_ERROR, "Could not create SDL event thread: %s\n", SDL_GetError());
+ ret = AVERROR_EXTERNAL;
goto fail;
}
- av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d\n",
- encctx->width, encctx->height, av_get_pix_fmt_name(encctx->pix_fmt), sar.num, sar.den,
- sdl->overlay_width, sdl->overlay_height);
+ /* wait until the video system has been inited */
+ SDL_LockMutex(sdl->mutex);
+ if (!sdl->inited) {
+ SDL_CondWait(sdl->init_cond, sdl->mutex);
+ }
+ SDL_UnlockMutex(sdl->mutex);
+ if (sdl->init_ret < 0) {
+ ret = sdl->init_ret;
+ goto fail;
+ }
return 0;
fail:
@@ -184,12 +311,16 @@ static int sdl_write_packet(AVFormatContext *s, AVPacket *pkt)
{
SDLContext *sdl = s->priv_data;
AVCodecContext *encctx = s->streams[0]->codec;
- SDL_Rect rect = { sdl->overlay_x, sdl->overlay_y, sdl->overlay_width, sdl->overlay_height };
AVPicture pict;
int i;
+ if (sdl->quit) {
+ sdl_write_trailer(s);
+ return AVERROR(EIO);
+ }
avpicture_fill(&pict, pkt->data, encctx->pix_fmt, encctx->width, encctx->height);
+ SDL_LockMutex(sdl->mutex);
SDL_FillRect(sdl->surface, &sdl->surface->clip_rect,
SDL_MapRGB(sdl->surface->format, 0, 0, 0));
SDL_LockYUVOverlay(sdl->overlay);
@@ -197,10 +328,13 @@ static int sdl_write_packet(AVFormatContext *s, AVPacket *pkt)
sdl->overlay->pixels [i] = pict.data [i];
sdl->overlay->pitches[i] = pict.linesize[i];
}
- SDL_DisplayYUVOverlay(sdl->overlay, &rect);
+ SDL_DisplayYUVOverlay(sdl->overlay, &sdl->overlay_rect);
SDL_UnlockYUVOverlay(sdl->overlay);
- SDL_UpdateRect(sdl->surface, rect.x, rect.y, rect.w, rect.h);
+ SDL_UpdateRect(sdl->surface,
+ sdl->overlay_rect.x, sdl->overlay_rect.y,
+ sdl->overlay_rect.w, sdl->overlay_rect.h);
+ SDL_UnlockMutex(sdl->mutex);
return 0;
}
@@ -208,10 +342,10 @@ static int sdl_write_packet(AVFormatContext *s, AVPacket *pkt)
#define OFFSET(x) offsetof(SDLContext,x)
static const AVOption options[] = {
- { "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
- { "icon_title", "set SDL iconified window title", OFFSET(icon_title) , AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
- { "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE,{.str=NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
- { "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_INT,{.i64=0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "icon_title", "set SDL iconified window title", OFFSET(icon_title) , AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
+ { "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL },
};
diff --git a/ffmpeg/libavdevice/timefilter.c b/ffmpeg/libavdevice/timefilter.c
index ba2e54e..9d38f93 100644
--- a/ffmpeg/libavdevice/timefilter.c
+++ b/ffmpeg/libavdevice/timefilter.c
@@ -49,6 +49,10 @@ TimeFilter *ff_timefilter_new(double time_base,
{
TimeFilter *self = av_mallocz(sizeof(TimeFilter));
double o = 2 * M_PI * bandwidth * period * time_base;
+
+ if (!self)
+ return NULL;
+
self->clock_period = time_base;
self->feedback2_factor = qexpneg(M_SQRT2 * o);
self->feedback3_factor = qexpneg(o * o) / period;
@@ -101,7 +105,7 @@ int main(void)
for (n0 = 0; n0 < 40; n0 = 2 * n0 + 1) {
for (n1 = 0; n1 < 10; n1 = 2 * n1 + 1) {
double best_error = 1000000000;
- double bestpar0 = 1;
+ double bestpar0 = n0 ? 1 : 100000;
double bestpar1 = 1;
int better, i;
@@ -121,6 +125,10 @@ int main(void)
for (par1 = bestpar1 * 0.8; par1 <= bestpar1 * 1.21; par1 += bestpar1 * 0.05) {
double error = 0;
TimeFilter *tf = ff_timefilter_new(1, par0, par1);
+ if (!tf) {
+ printf("Could not allocate memory for timefilter.\n");
+ exit(1);
+ }
for (i = 0; i < SAMPLES; i++) {
double filtered;
filtered = ff_timefilter_update(tf, samples[i], i ? (samplet[i] - samplet[i-1]) : 1);
@@ -150,7 +158,7 @@ int main(void)
}
ff_timefilter_destroy(tf);
#else
- printf(" [%f %f %9f]", bestpar0, bestpar1, best_error);
+ printf(" [%12f %11f %9f]", bestpar0, bestpar1, best_error);
#endif
}
printf("\n");
diff --git a/ffmpeg/libavdevice/timefilter.h b/ffmpeg/libavdevice/timefilter.h
index 6662959..cb3d0a7 100644
--- a/ffmpeg/libavdevice/timefilter.h
+++ b/ffmpeg/libavdevice/timefilter.h
@@ -58,6 +58,8 @@ typedef struct TimeFilter TimeFilter;
* @param period expected update interval, in input units
* @param brandwidth filtering bandwidth, in Hz
*
+ * @return a pointer to a TimeFilter struct, or NULL on error
+ *
* For more details about these parameters and background concepts please see:
* http://www.kokkinizita.net/papers/usingdll.pdf
*/
diff --git a/ffmpeg/libavdevice/v4l.c b/ffmpeg/libavdevice/v4l.c
index e2f37d6..bf2c9e3 100644
--- a/ffmpeg/libavdevice/v4l.c
+++ b/ffmpeg/libavdevice/v4l.c
@@ -37,7 +37,6 @@
#define _LINUX_TIME_H 1
#include <linux/videodev.h>
#include <time.h>
-#include "avdevice.h"
typedef struct {
AVClass *class;
diff --git a/ffmpeg/libavdevice/v4l2.c b/ffmpeg/libavdevice/v4l2.c
index 2d7773a..cb962b7 100644
--- a/ffmpeg/libavdevice/v4l2.c
+++ b/ffmpeg/libavdevice/v4l2.c
@@ -30,44 +30,10 @@
* V4L2_PIX_FMT_* and AV_PIX_FMT_*
*/
-#undef __STRICT_ANSI__ //workaround due to broken kernel headers
-#include "config.h"
-#include "libavformat/internal.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#if HAVE_SYS_VIDEOIO_H
-#include <sys/videoio.h>
-#else
-#if HAVE_ASM_TYPES_H
-#include <asm/types.h>
-#endif
-#include <linux/videodev2.h>
-#endif
-#include "libavutil/atomic.h"
-#include "libavutil/avassert.h"
-#include "libavutil/imgutils.h"
-#include "libavutil/log.h"
-#include "libavutil/opt.h"
-#include "avdevice.h"
-#include "timefilter.h"
-#include "libavutil/parseutils.h"
-#include "libavutil/pixdesc.h"
-#include "libavutil/time.h"
-#include "libavutil/avstring.h"
+#include "v4l2-common.h"
#if CONFIG_LIBV4L2
#include <libv4l2.h>
-#else
-#define v4l2_open open
-#define v4l2_close close
-#define v4l2_dup dup
-#define v4l2_ioctl ioctl
-#define v4l2_read read
-#define v4l2_mmap mmap
-#define v4l2_munmap munmap
#endif
static const int desired_video_buffers = 256;
@@ -121,6 +87,15 @@ struct video_data {
int list_format; /**< Set by a private option. */
int list_standard; /**< Set by a private option. */
char *framerate; /**< Set by a private option. */
+
+ int use_libv4l2;
+ int (*open_f)(const char *file, int oflag, ...);
+ int (*close_f)(int fd);
+ int (*dup_f)(int fd);
+ int (*ioctl_f)(int fd, unsigned long int request, ...);
+ ssize_t (*read_f)(int fd, void *buffer, size_t n);
+ void *(*mmap_f)(void *start, size_t length, int prot, int flags, int fd, int64_t offset);
+ int (*munmap_f)(void *_start, size_t length);
};
struct buff_data {
@@ -128,52 +103,43 @@ struct buff_data {
int index;
};
-struct fmt_map {
- enum AVPixelFormat ff_fmt;
- enum AVCodecID codec_id;
- uint32_t v4l2_fmt;
-};
-
-static struct fmt_map fmt_conversion_table[] = {
- //ff_fmt codec_id v4l2_fmt
- { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 },
- { AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU420 },
- { AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P },
- { AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV },
- { AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY },
- { AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P },
- { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 },
- { AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU410 },
- { AV_PIX_FMT_RGB555LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 },
- { AV_PIX_FMT_RGB555BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555X },
- { AV_PIX_FMT_RGB565LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 },
- { AV_PIX_FMT_RGB565BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565X },
- { AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 },
- { AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 },
- { AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 },
- { AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB32 },
- { AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY },
-#ifdef V4L2_PIX_FMT_Y16
- { AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Y16 },
-#endif
- { AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 },
- { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG },
- { AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG },
-#ifdef V4L2_PIX_FMT_H264
- { AV_PIX_FMT_NONE, AV_CODEC_ID_H264, V4L2_PIX_FMT_H264 },
-#endif
-#ifdef V4L2_PIX_FMT_CPIA1
- { AV_PIX_FMT_NONE, AV_CODEC_ID_CPIA, V4L2_PIX_FMT_CPIA1 },
-#endif
-};
-
static int device_open(AVFormatContext *ctx)
{
+ struct video_data *s = ctx->priv_data;
struct v4l2_capability cap;
int fd;
int ret;
int flags = O_RDWR;
+#define SET_WRAPPERS(prefix) do { \
+ s->open_f = prefix ## open; \
+ s->close_f = prefix ## close; \
+ s->dup_f = prefix ## dup; \
+ s->ioctl_f = prefix ## ioctl; \
+ s->read_f = prefix ## read; \
+ s->mmap_f = prefix ## mmap; \
+ s->munmap_f = prefix ## munmap; \
+} while (0)
+
+ if (s->use_libv4l2) {
+#if CONFIG_LIBV4L2
+ SET_WRAPPERS(v4l2_);
+#else
+ av_log(ctx, AV_LOG_ERROR, "libavdevice is not build with libv4l2 support.\n");
+ return AVERROR(EINVAL);
+#endif
+ } else {
+ SET_WRAPPERS();
+ }
+
+#define v4l2_open s->open_f
+#define v4l2_close s->close_f
+#define v4l2_dup s->dup_f
+#define v4l2_ioctl s->ioctl_f
+#define v4l2_read s->read_f
+#define v4l2_mmap s->mmap_f
+#define v4l2_munmap s->munmap_f
+
if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
flags |= O_NONBLOCK;
}
@@ -259,7 +225,7 @@ static int device_init(AVFormatContext *ctx, int *width, int *height,
return res;
}
-static int first_field(int fd)
+static int first_field(const struct video_data *s, int fd)
{
int res;
v4l2_std_id std;
@@ -275,55 +241,13 @@ static int first_field(int fd)
return 1;
}
-static uint32_t fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if ((codec_id == AV_CODEC_ID_NONE ||
- fmt_conversion_table[i].codec_id == codec_id) &&
- (pix_fmt == AV_PIX_FMT_NONE ||
- fmt_conversion_table[i].ff_fmt == pix_fmt)) {
- return fmt_conversion_table[i].v4l2_fmt;
- }
- }
-
- return 0;
-}
-
-static enum AVPixelFormat fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if (fmt_conversion_table[i].v4l2_fmt == v4l2_fmt &&
- fmt_conversion_table[i].codec_id == codec_id) {
- return fmt_conversion_table[i].ff_fmt;
- }
- }
-
- return AV_PIX_FMT_NONE;
-}
-
-static enum AVCodecID fmt_v4l2codec(uint32_t v4l2_fmt)
-{
- int i;
-
- for (i = 0; i < FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
- if (fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) {
- return fmt_conversion_table[i].codec_id;
- }
- }
-
- return AV_CODEC_ID_NONE;
-}
-
#if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE
static void list_framesizes(AVFormatContext *ctx, int fd, uint32_t pixelformat)
{
+ const struct video_data *s = ctx->priv_data;
struct v4l2_frmsizeenum vfse = { .pixel_format = pixelformat };
- while(!ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) {
+ while(!v4l2_ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) {
switch (vfse.type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
av_log(ctx, AV_LOG_INFO, " %ux%u",
@@ -346,11 +270,12 @@ static void list_framesizes(AVFormatContext *ctx, int fd, uint32_t pixelformat)
static void list_formats(AVFormatContext *ctx, int fd, int type)
{
+ const struct video_data *s = ctx->priv_data;
struct v4l2_fmtdesc vfd = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
- while(!ioctl(fd, VIDIOC_ENUM_FMT, &vfd)) {
- enum AVCodecID codec_id = fmt_v4l2codec(vfd.pixelformat);
- enum AVPixelFormat pix_fmt = fmt_v4l2ff(vfd.pixelformat, codec_id);
+ while(!v4l2_ioctl(fd, VIDIOC_ENUM_FMT, &vfd)) {
+ enum AVCodecID codec_id = avpriv_fmt_v4l2codec(vfd.pixelformat);
+ enum AVPixelFormat pix_fmt = avpriv_fmt_v4l2ff(vfd.pixelformat, codec_id);
vfd.index++;
@@ -371,10 +296,8 @@ static void list_formats(AVFormatContext *ctx, int fd, int type)
}
#ifdef V4L2_FMT_FLAG_EMULATED
- if (vfd.flags & V4L2_FMT_FLAG_EMULATED) {
- av_log(ctx, AV_LOG_WARNING, "%s", "Emulated");
- continue;
- }
+ if (vfd.flags & V4L2_FMT_FLAG_EMULATED)
+ av_log(ctx, AV_LOG_INFO, " Emulated :");
#endif
#if HAVE_STRUCT_V4L2_FRMIVALENUM_DISCRETE
list_framesizes(ctx, fd, vfd.pixelformat);
@@ -402,8 +325,8 @@ static void list_standards(AVFormatContext *ctx)
return;
}
}
- av_log(ctx, AV_LOG_INFO, "%2d, %16llx, %s\n",
- standard.index, standard.id, standard.name);
+ av_log(ctx, AV_LOG_INFO, "%2d, %16"PRIx64", %s\n",
+ standard.index, (uint64_t)standard.id, standard.name);
}
}
@@ -532,6 +455,8 @@ static int init_convert_timestamp(AVFormatContext *ctx, int64_t ts)
av_log(ctx, AV_LOG_INFO, "Detected monotonic timestamps, converting\n");
/* microseconds instead of seconds, MHz instead of Hz */
s->timefilter = ff_timefilter_new(1, period, 1.0E-6);
+ if (!s->timefilter)
+ return AVERROR(ENOMEM);
s->ts_mode = V4L_TS_CONVERT_READY;
return 0;
}
@@ -605,7 +530,7 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
/* Image is at s->buff_start[buf.index] */
if (avpriv_atomic_int_get(&s->buffers_queued) == FFMAX(s->buffers / 8, 1)) {
- /* when we start getting low on queued buffers, fallback to copying data */
+ /* when we start getting low on queued buffers, fall back on copying data */
res = av_new_packet(pkt, buf.bytesused);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n");
@@ -628,7 +553,9 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
pkt->data = s->buf_start[buf.index];
pkt->size = buf.bytesused;
#if FF_API_DESTRUCT_PACKET
+FF_DISABLE_DEPRECATION_WARNINGS
pkt->destruct = dummy_release_buffer;
+FF_ENABLE_DEPRECATION_WARNINGS
#endif
buf_descriptor = av_malloc(sizeof(struct buff_data));
@@ -762,12 +689,16 @@ static int v4l2_set_parameters(AVFormatContext *s1)
standard.index = i;
if (v4l2_ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
ret = AVERROR(errno);
+ if (ret == AVERROR(EINVAL)) {
+ tpf = &streamparm.parm.capture.timeperframe;
+ break;
+ }
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_ENUMSTD): %s\n", av_err2str(ret));
return ret;
}
if (standard.id == s->std_id) {
av_log(s1, AV_LOG_DEBUG,
- "Current standard: %s, id: %"PRIu64", frameperiod: %d/%d\n",
+ "Current standard: %s, id: %"PRIx64", frameperiod: %d/%d\n",
standard.name, (uint64_t)standard.id, tpf->numerator, tpf->denominator);
break;
}
@@ -827,7 +758,7 @@ static int device_try_init(AVFormatContext *s1,
{
int ret, i;
- *desired_format = fmt_ff2v4l(pix_fmt, s1->video_codec_id);
+ *desired_format = avpriv_fmt_ff2v4l(pix_fmt, s1->video_codec_id);
if (*desired_format) {
ret = device_init(s1, width, height, *desired_format);
@@ -839,14 +770,14 @@ static int device_try_init(AVFormatContext *s1,
}
if (!*desired_format) {
- for (i = 0; i<FF_ARRAY_ELEMS(fmt_conversion_table); i++) {
+ for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (s1->video_codec_id == AV_CODEC_ID_NONE ||
- fmt_conversion_table[i].codec_id == s1->video_codec_id) {
+ avpriv_fmt_conversion_table[i].codec_id == s1->video_codec_id) {
av_log(s1, AV_LOG_DEBUG, "Trying to set codec:%s pix_fmt:%s\n",
- avcodec_get_name(fmt_conversion_table[i].codec_id),
- (char *)av_x_if_null(av_get_pix_fmt_name(fmt_conversion_table[i].ff_fmt), "none"));
+ avcodec_get_name(avpriv_fmt_conversion_table[i].codec_id),
+ (char *)av_x_if_null(av_get_pix_fmt_name(avpriv_fmt_conversion_table[i].ff_fmt), "none"));
- *desired_format = fmt_conversion_table[i].v4l2_fmt;
+ *desired_format = avpriv_fmt_conversion_table[i].v4l2_fmt;
ret = device_init(s1, width, height, *desired_format);
if (ret >= 0)
break;
@@ -865,7 +796,7 @@ static int device_try_init(AVFormatContext *s1,
}
}
- *codec_id = fmt_v4l2codec(*desired_format);
+ *codec_id = avpriv_fmt_v4l2codec(*desired_format);
av_assert0(*codec_id != AV_CODEC_ID_NONE);
return ret;
}
@@ -887,21 +818,32 @@ static int v4l2_read_header(AVFormatContext *s1)
#if CONFIG_LIBV4L2
/* silence libv4l2 logging. if fopen() fails v4l2_log_file will be NULL
and errors will get sent to stderr */
- v4l2_log_file = fopen("/dev/null", "w");
+ if (s->use_libv4l2)
+ v4l2_log_file = fopen("/dev/null", "w");
#endif
s->fd = device_open(s1);
if (s->fd < 0)
return s->fd;
- /* set tv video input */
- av_log(s1, AV_LOG_DEBUG, "Selecting input_channel: %d\n", s->channel);
- if (v4l2_ioctl(s->fd, VIDIOC_S_INPUT, &s->channel) < 0) {
- res = AVERROR(errno);
- av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_INPUT): %s\n", av_err2str(res));
- return res;
+ if (s->channel != -1) {
+ /* set video input */
+ av_log(s1, AV_LOG_DEBUG, "Selecting input_channel: %d\n", s->channel);
+ if (v4l2_ioctl(s->fd, VIDIOC_S_INPUT, &s->channel) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_INPUT): %s\n", av_err2str(res));
+ return res;
+ }
+ } else {
+ /* get current video input */
+ if (v4l2_ioctl(s->fd, VIDIOC_G_INPUT, &s->channel) < 0) {
+ res = AVERROR(errno);
+ av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_INPUT): %s\n", av_err2str(res));
+ return res;
+ }
}
+ /* enum input */
input.index = s->channel;
if (v4l2_ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
res = AVERROR(errno);
@@ -909,8 +851,8 @@ static int v4l2_read_header(AVFormatContext *s1)
return res;
}
s->std_id = input.std;
- av_log(s1, AV_LOG_DEBUG, "input_channel: %d, input_name: %s\n",
- s->channel, input.name);
+ av_log(s1, AV_LOG_DEBUG, "Current input_channel: %d, input_name: %s, input_std: %"PRIx64"\n",
+ s->channel, input.name, (uint64_t)input.std);
if (s->list_format) {
list_formats(s1, s->fd, s->list_format);
@@ -941,11 +883,10 @@ static int v4l2_read_header(AVFormatContext *s1)
}
if (!s->width && !s->height) {
- struct v4l2_format fmt;
+ struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };
av_log(s1, AV_LOG_VERBOSE,
"Querying the device for the current frame size\n");
- fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (v4l2_ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
@@ -979,7 +920,7 @@ static int v4l2_read_header(AVFormatContext *s1)
if ((res = v4l2_set_parameters(s1)) < 0)
return res;
- st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id);
+ st->codec->pix_fmt = avpriv_fmt_v4l2ff(desired_format, codec_id);
s->frame_size =
avpicture_get_size(st->codec->pix_fmt, s->width, s->height);
@@ -989,13 +930,16 @@ static int v4l2_read_header(AVFormatContext *s1)
return res;
}
- s->top_field_first = first_field(s->fd);
+ s->top_field_first = first_field(s, s->fd);
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = codec_id;
if (codec_id == AV_CODEC_ID_RAWVIDEO)
st->codec->codec_tag =
avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
+ else if (codec_id == AV_CODEC_ID_H264) {
+ st->need_parsing = AVSTREAM_PARSE_HEADERS;
+ }
if (desired_format == V4L2_PIX_FMT_YVU420)
st->codec->codec_tag = MKTAG('Y', 'V', '1', '2');
else if (desired_format == V4L2_PIX_FMT_YVU410)
@@ -1045,7 +989,7 @@ static int v4l2_read_close(AVFormatContext *s1)
static const AVOption options[] = {
{ "standard", "set TV standard, used only by analog frame grabber", OFFSET(standard), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC },
- { "channel", "set TV channel, used only by frame grabber", OFFSET(channel), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, DEC },
+ { "channel", "set TV channel, used only by frame grabber", OFFSET(channel), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, DEC },
{ "video_size", "set frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "pixel_format", "set preferred pixel format", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "input_format", "set preferred pixel format (for raw video) or codec name", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
@@ -1064,7 +1008,7 @@ static const AVOption options[] = {
{ "default", "use timestamps from the kernel", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_DEFAULT }, 0, 2, DEC, "timestamps" },
{ "abs", "use absolute timestamps (wall clock)", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_ABS }, 0, 2, DEC, "timestamps" },
{ "mono2abs", "force conversion from monotonic to absolute timestamps", OFFSET(ts_mode), AV_OPT_TYPE_CONST, {.i64 = V4L_TS_MONO2ABS }, 0, 2, DEC, "timestamps" },
-
+ { "use_libv4l2", "use libv4l2 (v4l-utils) convertion functions", OFFSET(use_libv4l2), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
{ NULL },
};
diff --git a/ffmpeg/libavdevice/version.h b/ffmpeg/libavdevice/version.h
index c03e733..d569fae 100644
--- a/ffmpeg/libavdevice/version.h
+++ b/ffmpeg/libavdevice/version.h
@@ -25,11 +25,11 @@
* Libavdevice version macros
*/
-#include "libavutil/avutil.h"
+#include "libavutil/version.h"
#define LIBAVDEVICE_VERSION_MAJOR 55
-#define LIBAVDEVICE_VERSION_MINOR 0
-#define LIBAVDEVICE_VERSION_MICRO 100
+#define LIBAVDEVICE_VERSION_MINOR 5
+#define LIBAVDEVICE_VERSION_MICRO 102
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
diff --git a/ffmpeg/libavdevice/vfwcap.c b/ffmpeg/libavdevice/vfwcap.c
index 66b02be..014f18c 100644
--- a/ffmpeg/libavdevice/vfwcap.c
+++ b/ffmpeg/libavdevice/vfwcap.c
@@ -155,7 +155,7 @@ static void dump_bih(AVFormatContext *s, BITMAPINFOHEADER *bih)
static int shall_we_drop(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
- const uint8_t dropscore[] = {62, 75, 87, 100};
+ static const uint8_t dropscore[] = {62, 75, 87, 100};
const int ndropscores = FF_ARRAY_ELEMS(dropscore);
unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
diff --git a/ffmpeg/libavdevice/x11grab.c b/ffmpeg/libavdevice/x11grab.c
index 6124006..0e7b6ae 100644
--- a/ffmpeg/libavdevice/x11grab.c
+++ b/ffmpeg/libavdevice/x11grab.c
@@ -76,8 +76,11 @@ struct x11grab {
int draw_mouse; /**< Set by a private option. */
int follow_mouse; /**< Set by a private option. */
int show_region; /**< set by a private option. */
- char *framerate; /**< Set by a private option. */
+ AVRational framerate; /**< Set by a private option. */
+ int palette_changed;
+ uint32_t palette[256];
+ Cursor c;
Window region_win; /**< This is used by show_region option. */
};
@@ -166,7 +169,9 @@ x11grab_read_header(AVFormatContext *s1)
int use_shm;
char *dpyname, *offset;
int ret = 0;
- AVRational framerate;
+ Colormap color_map;
+ XColor color[256];
+ int i;
dpyname = av_strdup(s1->filename);
if (!dpyname)
@@ -184,10 +189,6 @@ x11grab_read_header(AVFormatContext *s1)
*offset= 0;
}
- if ((ret = av_parse_video_rate(&framerate, x11grab->framerate)) < 0) {
- av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate);
- goto out;
- }
av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n",
s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height);
@@ -262,6 +263,15 @@ x11grab_read_header(AVFormatContext *s1)
case 8:
av_log (s1, AV_LOG_DEBUG, "8 bit palette\n");
input_pixfmt = AV_PIX_FMT_PAL8;
+ color_map = DefaultColormap(dpy, screen);
+ for (i = 0; i < 256; ++i)
+ color[i].pixel = i;
+ XQueryColors(dpy, color_map, color, 256);
+ for (i = 0; i < 256; ++i)
+ x11grab->palette[i] = (color[i].red & 0xFF00) << 8 |
+ (color[i].green & 0xFF00) |
+ (color[i].blue & 0xFF00) >> 8;
+ x11grab->palette_changed = 1;
break;
case 16:
if ( image->red_mask == 0xf800 &&
@@ -308,7 +318,7 @@ x11grab_read_header(AVFormatContext *s1)
x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8;
x11grab->dpy = dpy;
- x11grab->time_base = av_inv_q(framerate);
+ x11grab->time_base = av_inv_q(x11grab->framerate);
x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base);
x11grab->x_off = x_off;
x11grab->y_off = y_off;
@@ -353,7 +363,6 @@ paint_mouse_pointer(XImage *image, struct x11grab *s)
* Anyone who performs further investigation of the xlib API likely risks
* permanent brain damage. */
uint8_t *pix = image->data;
- Cursor c;
Window w;
XSetWindowAttributes attr;
@@ -361,9 +370,10 @@ paint_mouse_pointer(XImage *image, struct x11grab *s)
if (image->bits_per_pixel != 24 && image->bits_per_pixel != 32)
return;
- c = XCreateFontCursor(dpy, XC_left_ptr);
+ if(!s->c)
+ s->c = XCreateFontCursor(dpy, XC_left_ptr);
w = DefaultRootWindow(dpy);
- attr.cursor = c;
+ attr.cursor = s->c;
XChangeWindowAttributes(dpy, w, CWCursor, &attr);
xcim = XFixesGetCursorImage(dpy);
@@ -493,6 +503,16 @@ x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
pkt->data = image->data;
pkt->size = s->frame_size;
pkt->pts = curtime;
+ if (s->palette_changed) {
+ uint8_t *pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
+ AVPALETTE_SIZE);
+ if (!pal) {
+ av_log(s, AV_LOG_ERROR, "Cannot append palette to packet\n");
+ } else {
+ memcpy(pal, s->palette, AVPALETTE_SIZE);
+ s->palette_changed = 0;
+ }
+ }
screen = DefaultScreen(dpy);
root = RootWindow(dpy, screen);
@@ -602,7 +622,7 @@ static const AVOption options[] = {
{ "centered", "keep the mouse pointer at the center of grabbing region when following",
0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "follow_mouse" },
- { "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
+ { "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, 0, DEC },
{ "show_region", "show the grabbing region", OFFSET(show_region), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
{ "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ NULL },