summaryrefslogtreecommitdiff
path: root/ffmpeg/doc/examples
diff options
context:
space:
mode:
authorTim Redfern <tim@eclectronics.org>2013-12-29 12:19:38 +0000
committerTim Redfern <tim@eclectronics.org>2013-12-29 12:19:38 +0000
commitf7813a5324be39d13ab536c245d15dfc602a7849 (patch)
treefad99148b88823d34a5df2f0a25881a002eb291b /ffmpeg/doc/examples
parentb7a5a477b8ff4d4e3028b9dfb9a9df0a41463f92 (diff)
basic type mechanism working
Diffstat (limited to 'ffmpeg/doc/examples')
-rw-r--r--ffmpeg/doc/examples/Makefile6
-rw-r--r--ffmpeg/doc/examples/README13
-rw-r--r--ffmpeg/doc/examples/decoding_encoding.c29
-rw-r--r--ffmpeg/doc/examples/demuxing.c342
-rw-r--r--ffmpeg/doc/examples/filtering_audio.c94
-rw-r--r--ffmpeg/doc/examples/filtering_video.c46
-rw-r--r--ffmpeg/doc/examples/muxing.c159
-rw-r--r--ffmpeg/doc/examples/pc-uninstalled/libavcodec.pc6
-rw-r--r--ffmpeg/doc/examples/pc-uninstalled/libavdevice.pc6
-rw-r--r--ffmpeg/doc/examples/pc-uninstalled/libavfilter.pc6
-rw-r--r--ffmpeg/doc/examples/pc-uninstalled/libavformat.pc6
-rw-r--r--ffmpeg/doc/examples/pc-uninstalled/libavutil.pc4
-rw-r--r--ffmpeg/doc/examples/pc-uninstalled/libpostproc.pc6
-rw-r--r--ffmpeg/doc/examples/pc-uninstalled/libswresample.pc6
-rw-r--r--ffmpeg/doc/examples/pc-uninstalled/libswscale.pc6
-rw-r--r--ffmpeg/doc/examples/resampling_audio.c26
16 files changed, 267 insertions, 494 deletions
diff --git a/ffmpeg/doc/examples/Makefile b/ffmpeg/doc/examples/Makefile
index c849daa..f085532 100644
--- a/ffmpeg/doc/examples/Makefile
+++ b/ffmpeg/doc/examples/Makefile
@@ -7,24 +7,26 @@ FFMPEG_LIBS= libavdevice \
libswscale \
libavutil \
-CFLAGS += -Wall -O2 -g
+CFLAGS += -Wall -g
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
EXAMPLES= decoding_encoding \
- demuxing \
+ demuxing_decoding \
filtering_video \
filtering_audio \
metadata \
muxing \
resampling_audio \
scaling_video \
+ transcode_aac \
OBJS=$(addsuffix .o,$(EXAMPLES))
# the following examples make explicit use of the math library
decoding_encoding: LDLIBS += -lm
muxing: LDLIBS += -lm
+resampling_audio: LDLIBS += -lm
.phony: all clean-test clean
diff --git a/ffmpeg/doc/examples/README b/ffmpeg/doc/examples/README
index a461813..c1ce619 100644
--- a/ffmpeg/doc/examples/README
+++ b/ffmpeg/doc/examples/README
@@ -5,14 +5,19 @@ Both following use cases rely on pkg-config and make, thus make sure
that you have them installed and working on your system.
-1) Build the installed examples in a generic read/write user directory
+Method 1: build the installed examples in a generic read/write user directory
Copy to a read/write user directory and just use "make", it will link
to the libraries on your system, assuming the PKG_CONFIG_PATH is
correctly configured.
-2) Build the examples in-tree
+Method 2: build the examples in-tree
Assuming you are in the source FFmpeg checkout directory, you need to build
-FFmpeg (no need to make install in any prefix). Then you can go into the
-doc/examples and run a command such as PKG_CONFIG_PATH=pc-uninstalled make.
+FFmpeg (no need to make install in any prefix). Then just run "make examples".
+This will build the examples using the FFmpeg build system. You can clean those
+examples using "make examplesclean"
+
+If you want to try the dedicated Makefile examples (to emulate the first
+method), go into doc/examples and run a command such as
+PKG_CONFIG_PATH=pc-uninstalled make.
diff --git a/ffmpeg/doc/examples/decoding_encoding.c b/ffmpeg/doc/examples/decoding_encoding.c
index ae1057c..08e8b92 100644
--- a/ffmpeg/doc/examples/decoding_encoding.c
+++ b/ffmpeg/doc/examples/decoding_encoding.c
@@ -79,7 +79,7 @@ static int select_channel_layout(AVCodec *codec)
{
const uint64_t *p;
uint64_t best_ch_layout = 0;
- int best_nb_channells = 0;
+ int best_nb_channels = 0;
if (!codec->channel_layouts)
return AV_CH_LAYOUT_STEREO;
@@ -88,9 +88,9 @@ static int select_channel_layout(AVCodec *codec)
while (*p) {
int nb_channels = av_get_channel_layout_nb_channels(*p);
- if (nb_channels > best_nb_channells) {
+ if (nb_channels > best_nb_channels) {
best_ch_layout = *p;
- best_nb_channells = nb_channels;
+ best_nb_channels = nb_channels;
}
p++;
}
@@ -156,7 +156,7 @@ static void audio_encode_example(const char *filename)
}
/* frame containing input raw audio */
- frame = avcodec_alloc_frame();
+ frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
@@ -170,6 +170,10 @@ static void audio_encode_example(const char *filename)
* we calculate the size of the samples buffer in bytes */
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
c->sample_fmt, 0);
+ if (buffer_size < 0) {
+ fprintf(stderr, "Could not get sample buffer size\n");
+ exit(1);
+ }
samples = av_malloc(buffer_size);
if (!samples) {
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
@@ -227,7 +231,7 @@ static void audio_encode_example(const char *filename)
fclose(f);
av_freep(&samples);
- avcodec_free_frame(&frame);
+ av_frame_free(&frame);
avcodec_close(c);
av_free(c);
}
@@ -287,12 +291,11 @@ static void audio_decode_example(const char *outfilename, const char *filename)
int got_frame = 0;
if (!decoded_frame) {
- if (!(decoded_frame = avcodec_alloc_frame())) {
+ if (!(decoded_frame = av_frame_alloc())) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
- } else
- avcodec_get_frame_defaults(decoded_frame);
+ }
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
if (len < 0) {
@@ -329,7 +332,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
avcodec_close(c);
av_free(c);
- avcodec_free_frame(&decoded_frame);
+ av_frame_free(&decoded_frame);
}
/*
@@ -386,7 +389,7 @@ static void video_encode_example(const char *filename, int codec_id)
exit(1);
}
- frame = avcodec_alloc_frame();
+ frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
@@ -467,7 +470,7 @@ static void video_encode_example(const char *filename, int codec_id)
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
- avcodec_free_frame(&frame);
+ av_frame_free(&frame);
printf("\n");
}
@@ -565,7 +568,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
exit(1);
}
- frame = avcodec_alloc_frame();
+ frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
@@ -609,7 +612,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
avcodec_close(c);
av_free(c);
- avcodec_free_frame(&frame);
+ av_frame_free(&frame);
printf("\n");
}
diff --git a/ffmpeg/doc/examples/demuxing.c b/ffmpeg/doc/examples/demuxing.c
deleted file mode 100644
index 8a1b69b..0000000
--- a/ffmpeg/doc/examples/demuxing.c
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * Copyright (c) 2012 Stefano Sabatini
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/**
- * @file
- * libavformat demuxing API use example.
- *
- * Show how to use the libavformat and libavcodec API to demux and
- * decode audio and video data.
- * @example doc/examples/demuxing.c
- */
-
-#include <libavutil/imgutils.h>
-#include <libavutil/samplefmt.h>
-#include <libavutil/timestamp.h>
-#include <libavformat/avformat.h>
-
-static AVFormatContext *fmt_ctx = NULL;
-static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
-static AVStream *video_stream = NULL, *audio_stream = NULL;
-static const char *src_filename = NULL;
-static const char *video_dst_filename = NULL;
-static const char *audio_dst_filename = NULL;
-static FILE *video_dst_file = NULL;
-static FILE *audio_dst_file = NULL;
-
-static uint8_t *video_dst_data[4] = {NULL};
-static int video_dst_linesize[4];
-static int video_dst_bufsize;
-
-static uint8_t **audio_dst_data = NULL;
-static int audio_dst_linesize;
-static int audio_dst_bufsize;
-
-static int video_stream_idx = -1, audio_stream_idx = -1;
-static AVFrame *frame = NULL;
-static AVPacket pkt;
-static int video_frame_count = 0;
-static int audio_frame_count = 0;
-
-static int decode_packet(int *got_frame, int cached)
-{
- int ret = 0;
-
- if (pkt.stream_index == video_stream_idx) {
- /* decode video frame */
- ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
- if (ret < 0) {
- fprintf(stderr, "Error decoding video frame\n");
- return ret;
- }
-
- if (*got_frame) {
- printf("video_frame%s n:%d coded_n:%d pts:%s\n",
- cached ? "(cached)" : "",
- video_frame_count++, frame->coded_picture_number,
- av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
-
- /* copy decoded frame to destination buffer:
- * this is required since rawvideo expects non aligned data */
- av_image_copy(video_dst_data, video_dst_linesize,
- (const uint8_t **)(frame->data), frame->linesize,
- video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
-
- /* write to rawvideo file */
- fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
- }
- } else if (pkt.stream_index == audio_stream_idx) {
- /* decode audio frame */
- ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
- if (ret < 0) {
- fprintf(stderr, "Error decoding audio frame\n");
- return ret;
- }
-
- if (*got_frame) {
- printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
- cached ? "(cached)" : "",
- audio_frame_count++, frame->nb_samples,
- av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
-
- ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, av_frame_get_channels(frame),
- frame->nb_samples, frame->format, 1);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate audio buffer\n");
- return AVERROR(ENOMEM);
- }
-
- /* TODO: extend return code of the av_samples_* functions so that this call is not needed */
- audio_dst_bufsize =
- av_samples_get_buffer_size(NULL, av_frame_get_channels(frame),
- frame->nb_samples, frame->format, 1);
-
- /* copy audio data to destination buffer:
- * this is required since rawaudio expects non aligned data */
- av_samples_copy(audio_dst_data, frame->data, 0, 0,
- frame->nb_samples, av_frame_get_channels(frame), frame->format);
-
- /* write to rawaudio file */
- fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
- av_freep(&audio_dst_data[0]);
- }
- }
-
- return ret;
-}
-
-static int open_codec_context(int *stream_idx,
- AVFormatContext *fmt_ctx, enum AVMediaType type)
-{
- int ret;
- AVStream *st;
- AVCodecContext *dec_ctx = NULL;
- AVCodec *dec = NULL;
-
- ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
- if (ret < 0) {
- fprintf(stderr, "Could not find %s stream in input file '%s'\n",
- av_get_media_type_string(type), src_filename);
- return ret;
- } else {
- *stream_idx = ret;
- st = fmt_ctx->streams[*stream_idx];
-
- /* find decoder for the stream */
- dec_ctx = st->codec;
- dec = avcodec_find_decoder(dec_ctx->codec_id);
- if (!dec) {
- fprintf(stderr, "Failed to find %s codec\n",
- av_get_media_type_string(type));
- return ret;
- }
-
- if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
- fprintf(stderr, "Failed to open %s codec\n",
- av_get_media_type_string(type));
- return ret;
- }
- }
-
- return 0;
-}
-
-static int get_format_from_sample_fmt(const char **fmt,
- enum AVSampleFormat sample_fmt)
-{
- int i;
- struct sample_fmt_entry {
- enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
- } sample_fmt_entries[] = {
- { AV_SAMPLE_FMT_U8, "u8", "u8" },
- { AV_SAMPLE_FMT_S16, "s16be", "s16le" },
- { AV_SAMPLE_FMT_S32, "s32be", "s32le" },
- { AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
- { AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
- };
- *fmt = NULL;
-
- for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
- struct sample_fmt_entry *entry = &sample_fmt_entries[i];
- if (sample_fmt == entry->sample_fmt) {
- *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
- return 0;
- }
- }
-
- fprintf(stderr,
- "sample format %s is not supported as output format\n",
- av_get_sample_fmt_name(sample_fmt));
- return -1;
-}
-
-int main (int argc, char **argv)
-{
- int ret = 0, got_frame;
-
- if (argc != 4) {
- fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
- "API example program to show how to read frames from an input file.\n"
- "This program reads frames from a file, decodes them, and writes decoded\n"
- "video frames to a rawvideo file named video_output_file, and decoded\n"
- "audio frames to a rawaudio file named audio_output_file.\n"
- "\n", argv[0]);
- exit(1);
- }
- src_filename = argv[1];
- video_dst_filename = argv[2];
- audio_dst_filename = argv[3];
-
- /* register all formats and codecs */
- av_register_all();
-
- /* open input file, and allocate format context */
- if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
- fprintf(stderr, "Could not open source file %s\n", src_filename);
- exit(1);
- }
-
- /* retrieve stream information */
- if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
- fprintf(stderr, "Could not find stream information\n");
- exit(1);
- }
-
- if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
- video_stream = fmt_ctx->streams[video_stream_idx];
- video_dec_ctx = video_stream->codec;
-
- video_dst_file = fopen(video_dst_filename, "wb");
- if (!video_dst_file) {
- fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
- ret = 1;
- goto end;
- }
-
- /* allocate image where the decoded image will be put */
- ret = av_image_alloc(video_dst_data, video_dst_linesize,
- video_dec_ctx->width, video_dec_ctx->height,
- video_dec_ctx->pix_fmt, 1);
- if (ret < 0) {
- fprintf(stderr, "Could not allocate raw video buffer\n");
- goto end;
- }
- video_dst_bufsize = ret;
- }
-
- if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
- int nb_planes;
-
- audio_stream = fmt_ctx->streams[audio_stream_idx];
- audio_dec_ctx = audio_stream->codec;
- audio_dst_file = fopen(audio_dst_filename, "wb");
- if (!audio_dst_file) {
- fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
- ret = 1;
- goto end;
- }
-
- nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
- audio_dec_ctx->channels : 1;
- audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
- if (!audio_dst_data) {
- fprintf(stderr, "Could not allocate audio data buffers\n");
- ret = AVERROR(ENOMEM);
- goto end;
- }
- }
-
- /* dump input information to stderr */
- av_dump_format(fmt_ctx, 0, src_filename, 0);
-
- if (!audio_stream && !video_stream) {
- fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
- ret = 1;
- goto end;
- }
-
- frame = avcodec_alloc_frame();
- if (!frame) {
- fprintf(stderr, "Could not allocate frame\n");
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- /* initialize packet, set data to NULL, let the demuxer fill it */
- av_init_packet(&pkt);
- pkt.data = NULL;
- pkt.size = 0;
-
- if (video_stream)
- printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
- if (audio_stream)
- printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
-
- /* read frames from the file */
- while (av_read_frame(fmt_ctx, &pkt) >= 0) {
- decode_packet(&got_frame, 0);
- av_free_packet(&pkt);
- }
-
- /* flush cached frames */
- pkt.data = NULL;
- pkt.size = 0;
- do {
- decode_packet(&got_frame, 1);
- } while (got_frame);
-
- printf("Demuxing succeeded.\n");
-
- if (video_stream) {
- printf("Play the output video file with the command:\n"
- "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
- av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
- video_dst_filename);
- }
-
- if (audio_stream) {
- const char *fmt;
-
- if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt)) < 0)
- goto end;
- printf("Play the output audio file with the command:\n"
- "ffplay -f %s -ac %d -ar %d %s\n",
- fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
- audio_dst_filename);
- }
-
-end:
- if (video_dec_ctx)
- avcodec_close(video_dec_ctx);
- if (audio_dec_ctx)
- avcodec_close(audio_dec_ctx);
- avformat_close_input(&fmt_ctx);
- if (video_dst_file)
- fclose(video_dst_file);
- if (audio_dst_file)
- fclose(audio_dst_file);
- av_free(frame);
- av_free(video_dst_data[0]);
- av_free(audio_dst_data);
-
- return ret < 0;
-}
diff --git a/ffmpeg/doc/examples/filtering_audio.c b/ffmpeg/doc/examples/filtering_audio.c
index 456a1c9..1d66ca3 100644
--- a/ffmpeg/doc/examples/filtering_audio.c
+++ b/ffmpeg/doc/examples/filtering_audio.c
@@ -36,9 +36,10 @@
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
-const char *filter_descr = "aresample=8000,aconvert=s16:mono";
-const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
+static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
+static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
@@ -70,6 +71,7 @@ static int open_input_file(const char *filename)
}
audio_stream_index = ret;
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
+ av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
@@ -83,17 +85,22 @@ static int open_input_file(const char *filename)
static int init_filters(const char *filters_descr)
{
char args[512];
- int ret;
+ int ret = 0;
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
- const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
- AVABufferSinkParams *abuffersink_params;
+ static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
+ static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
+ static const int out_sample_rates[] = { 8000, -1 };
const AVFilterLink *outlink;
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
filter_graph = avfilter_graph_alloc();
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!dec_ctx->channel_layout)
@@ -106,18 +113,36 @@ static int init_filters(const char *filters_descr)
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
- return ret;
+ goto end;
}
/* buffer audio sink: to terminate the filter chain. */
- abuffersink_params = av_abuffersink_params_alloc();
- abuffersink_params->sample_fmts = sample_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
- NULL, abuffersink_params, filter_graph);
- av_free(abuffersink_params);
+ NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
- return ret;
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
+ goto end;
}
/* Endpoints for the filter graph. */
@@ -131,12 +156,12 @@ static int init_filters(const char *filters_descr)
inputs->pad_idx = 0;
inputs->next = NULL;
- if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
- &inputs, &outputs, NULL)) < 0)
- return ret;
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
- return ret;
+ goto end;
/* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */
@@ -147,7 +172,11 @@ static int init_filters(const char *filters_descr)
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
args);
- return 0;
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
}
static void print_frame(const AVFrame *frame)
@@ -167,7 +196,7 @@ static void print_frame(const AVFrame *frame)
int main(int argc, char **argv)
{
int ret;
- AVPacket packet;
+ AVPacket packet0, packet;
AVFrame *frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
int got_frame;
@@ -191,9 +220,14 @@ int main(int argc, char **argv)
goto end;
/* read all packets */
+ packet0.data = NULL;
+ packet.data = NULL;
while (1) {
- if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
- break;
+ if (!packet0.data) {
+ if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
+ break;
+ packet0 = packet;
+ }
if (packet.stream_index == audio_stream_index) {
avcodec_get_frame_defaults(frame);
@@ -203,10 +237,12 @@ int main(int argc, char **argv)
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
continue;
}
+ packet.size -= ret;
+ packet.data += ret;
if (got_frame) {
/* push the audio data from decoded frame into the filtergraph */
- if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
+ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
break;
}
@@ -214,29 +250,31 @@ int main(int argc, char **argv)
/* pull filtered audio from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
- if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
- if(ret < 0)
+ if (ret < 0)
goto end;
print_frame(filt_frame);
av_frame_unref(filt_frame);
}
}
+
+ if (packet.size <= 0)
+ av_free_packet(&packet0);
+ } else {
+ /* discard non-wanted packets */
+ av_free_packet(&packet0);
}
- av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);
- if (dec_ctx)
- avcodec_close(dec_ctx);
+ avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
- char buf[1024];
- av_strerror(ret, buf, sizeof(buf));
- fprintf(stderr, "Error occurred: %s\n", buf);
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
diff --git a/ffmpeg/doc/examples/filtering_video.c b/ffmpeg/doc/examples/filtering_video.c
index daa3966..790c641 100644
--- a/ffmpeg/doc/examples/filtering_video.c
+++ b/ffmpeg/doc/examples/filtering_video.c
@@ -36,6 +36,7 @@
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
const char *filter_descr = "scale=78:24";
@@ -70,6 +71,7 @@ static int open_input_file(const char *filename)
}
video_stream_index = ret;
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
+ av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the video decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
@@ -83,15 +85,18 @@ static int open_input_file(const char *filename)
static int init_filters(const char *filters_descr)
{
char args[512];
- int ret;
+ int ret = 0;
AVFilter *buffersrc = avfilter_get_by_name("buffer");
AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
- AVBufferSinkParams *buffersink_params;
filter_graph = avfilter_graph_alloc();
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
@@ -104,18 +109,22 @@ static int init_filters(const char *filters_descr)
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
- return ret;
+ goto end;
}
/* buffer video sink: to terminate the filter chain. */
- buffersink_params = av_buffersink_params_alloc();
- buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
- NULL, buffersink_params, filter_graph);
- av_free(buffersink_params);
+ NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
- return ret;
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
+ AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
+ goto end;
}
/* Endpoints for the filter graph. */
@@ -129,13 +138,18 @@ static int init_filters(const char *filters_descr)
inputs->pad_idx = 0;
inputs->next = NULL;
- if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
- return ret;
+ goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
- return ret;
- return 0;
+ goto end;
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
}
static void display_frame(const AVFrame *frame, AVRational time_base)
@@ -228,22 +242,20 @@ int main(int argc, char **argv)
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
av_frame_unref(filt_frame);
}
+ av_frame_unref(frame);
}
}
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);
- if (dec_ctx)
- avcodec_close(dec_ctx);
+ avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
- char buf[1024];
- av_strerror(ret, buf, sizeof(buf));
- fprintf(stderr, "Error occurred: %s\n", buf);
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
diff --git a/ffmpeg/doc/examples/muxing.c b/ffmpeg/doc/examples/muxing.c
index 7305cc6..4cd3f65 100644
--- a/ffmpeg/doc/examples/muxing.c
+++ b/ffmpeg/doc/examples/muxing.c
@@ -34,9 +34,11 @@
#include <string.h>
#include <math.h>
+#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
+#include <libswresample/swresample.h>
/* 5 seconds stream duration */
#define STREAM_DURATION 200.0
@@ -46,13 +48,6 @@
static int sws_flags = SWS_BICUBIC;
-/**************************************************************/
-/* audio output */
-
-static float t, tincr, tincr2;
-static int16_t *samples;
-static int audio_input_frame_size;
-
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
@@ -78,8 +73,7 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
- st->id = 1;
- c->sample_fmt = AV_SAMPLE_FMT_S16;
+ c->sample_fmt = AV_SAMPLE_FMT_FLTP;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
@@ -127,8 +121,17 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
/* audio output */
static float t, tincr, tincr2;
-static int16_t *samples;
-static int audio_input_frame_size;
+
+static uint8_t **src_samples_data;
+static int src_samples_linesize;
+static int src_nb_samples;
+
+static int max_dst_nb_samples;
+uint8_t **dst_samples_data;
+int dst_samples_linesize;
+int dst_samples_size;
+
+struct SwrContext *swr_ctx = NULL;
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
@@ -150,17 +153,54 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
/* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
- if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
- audio_input_frame_size = 10000;
- else
- audio_input_frame_size = c->frame_size;
- samples = av_malloc(audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels);
- if (!samples) {
- fprintf(stderr, "Could not allocate audio samples buffer\n");
+ src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
+ 10000 : c->frame_size;
+
+ ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
+ src_nb_samples, AV_SAMPLE_FMT_S16, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Could not allocate source samples\n");
exit(1);
}
+
+ /* compute the number of converted samples: buffering is avoided
+ * ensuring that the output buffer will contain at least all the
+ * converted input samples */
+ max_dst_nb_samples = src_nb_samples;
+
+ /* create resampler context */
+ if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
+ swr_ctx = swr_alloc();
+ if (!swr_ctx) {
+ fprintf(stderr, "Could not allocate resampler context\n");
+ exit(1);
+ }
+
+ /* set options */
+ av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0);
+ av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0);
+ av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
+ av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0);
+ av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0);
+ av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
+
+ /* initialize the resampling context */
+ if ((ret = swr_init(swr_ctx)) < 0) {
+ fprintf(stderr, "Failed to initialize the resampling context\n");
+ exit(1);
+ }
+
+ ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
+ max_dst_nb_samples, c->sample_fmt, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Could not allocate destination samples\n");
+ exit(1);
+ }
+ } else {
+ dst_samples_data = src_samples_data;
+ }
+ dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
+ c->sample_fmt, 0);
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
@@ -184,19 +224,45 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
- AVFrame *frame = avcodec_alloc_frame();
- int got_packet, ret;
+ AVFrame *frame = av_frame_alloc();
+ int got_packet, ret, dst_nb_samples;
av_init_packet(&pkt);
c = st->codec;
- get_audio_frame(samples, audio_input_frame_size, c->channels);
- frame->nb_samples = audio_input_frame_size;
+ get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
+
+ /* convert samples from native format to destination codec format, using the resampler */
+ if (swr_ctx) {
+ /* compute destination number of samples */
+ dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
+ c->sample_rate, c->sample_rate, AV_ROUND_UP);
+ if (dst_nb_samples > max_dst_nb_samples) {
+ av_free(dst_samples_data[0]);
+ ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
+ dst_nb_samples, c->sample_fmt, 0);
+ if (ret < 0)
+ exit(1);
+ max_dst_nb_samples = dst_nb_samples;
+ dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
+ c->sample_fmt, 0);
+ }
+
+ /* convert to destination format */
+ ret = swr_convert(swr_ctx,
+ dst_samples_data, dst_nb_samples,
+ (const uint8_t **)src_samples_data, src_nb_samples);
+ if (ret < 0) {
+ fprintf(stderr, "Error while converting\n");
+ exit(1);
+ }
+ } else {
+ dst_nb_samples = src_nb_samples;
+ }
+
+ frame->nb_samples = dst_nb_samples;
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- (uint8_t *)samples,
- audio_input_frame_size *
- av_get_bytes_per_sample(c->sample_fmt) *
- c->channels, 1);
+ dst_samples_data[0], dst_samples_size, 0);
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
@@ -205,7 +271,7 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
}
if (!got_packet)
- return;
+ goto freeframe;
pkt.stream_index = st->index;
@@ -216,14 +282,19 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
av_err2str(ret));
exit(1);
}
- avcodec_free_frame(&frame);
+freeframe:
+ av_frame_free(&frame);
}
static void close_audio(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
-
- av_free(samples);
+ if (dst_samples_data != src_samples_data) {
+ av_free(dst_samples_data[0]);
+ av_free(dst_samples_data);
+ }
+ av_free(src_samples_data[0]);
+ av_free(src_samples_data);
}
/**************************************************************/
@@ -246,7 +317,7 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
}
/* allocate and init a re-usable frame */
- frame = avcodec_alloc_frame();
+ frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
@@ -388,7 +459,7 @@ int main(int argc, char **argv)
AVFormatContext *oc;
AVStream *audio_st, *video_st;
AVCodec *audio_codec, *video_codec;
- double audio_pts, video_pts;
+ double audio_time, video_time;
int ret;
/* Initialize libavcodec, and register all codecs and formats. */
@@ -461,23 +532,15 @@ int main(int argc, char **argv)
frame->pts = 0;
for (;;) {
/* Compute current audio and video time. */
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- if ((!audio_st || audio_pts >= STREAM_DURATION) &&
- (!video_st || video_pts >= STREAM_DURATION))
+ audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0;
+ video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0;
+
+ if ((!audio_st || audio_time >= STREAM_DURATION) &&
+ (!video_st || video_time >= STREAM_DURATION))
break;
/* write interleaved audio and video frames */
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
+ if (!video_st || (video_st && audio_st && audio_time < video_time)) {
write_audio_frame(oc, audio_st);
} else {
write_video_frame(oc, video_st);
diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavcodec.pc b/ffmpeg/doc/examples/pc-uninstalled/libavcodec.pc
index 787d687..a87ded7 100644
--- a/ffmpeg/doc/examples/pc-uninstalled/libavcodec.pc
+++ b/ffmpeg/doc/examples/pc-uninstalled/libavcodec.pc
@@ -5,8 +5,8 @@ includedir=${pcfiledir}/../../..
Name: libavcodec
Description: FFmpeg codec library
-Version: 55.1.100
-Requires: libavutil = 52.22.100
+Version: 55.46.100
+Requires: libavutil = 52.59.100
Conflicts:
-Libs: -L${libdir} -lavcodec
+Libs: -L${libdir} -Wl,-rpath,${libdir} -lavcodec
Cflags: -I${includedir}
diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavdevice.pc b/ffmpeg/doc/examples/pc-uninstalled/libavdevice.pc
index 89ef046..7f05a29 100644
--- a/ffmpeg/doc/examples/pc-uninstalled/libavdevice.pc
+++ b/ffmpeg/doc/examples/pc-uninstalled/libavdevice.pc
@@ -5,8 +5,8 @@ includedir=${pcfiledir}/../../..
Name: libavdevice
Description: FFmpeg device handling library
-Version: 55.0.100
-Requires: libavfilter = 3.48.100, libavformat = 55.0.100
+Version: 55.5.102
+Requires: libavfilter = 4.0.103, libavformat = 55.22.100
Conflicts:
-Libs: -L${libdir} -lavdevice
+Libs: -L${libdir} -Wl,-rpath,${libdir} -lavdevice
Cflags: -I${includedir}
diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavfilter.pc b/ffmpeg/doc/examples/pc-uninstalled/libavfilter.pc
index aacaf0a..b42f95d 100644
--- a/ffmpeg/doc/examples/pc-uninstalled/libavfilter.pc
+++ b/ffmpeg/doc/examples/pc-uninstalled/libavfilter.pc
@@ -5,8 +5,8 @@ includedir=${pcfiledir}/../../..
Name: libavfilter
Description: FFmpeg audio/video filtering library
-Version: 3.48.100
-Requires: libpostproc = 52.2.100, libswresample = 0.17.102, libswscale = 2.2.100, libavformat = 55.0.100, libavcodec = 55.1.100, libavutil = 52.22.100
+Version: 4.0.103
+Requires: libpostproc = 52.3.100, libswresample = 0.17.104, libswscale = 2.5.101, libavformat = 55.22.100, libavcodec = 55.46.100, libavutil = 52.59.100
Conflicts:
-Libs: -L${libdir} -lavfilter
+Libs: -L${libdir} -Wl,-rpath,${libdir} -lavfilter
Cflags: -I${includedir}
diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavformat.pc b/ffmpeg/doc/examples/pc-uninstalled/libavformat.pc
index 8f27151..8bab324 100644
--- a/ffmpeg/doc/examples/pc-uninstalled/libavformat.pc
+++ b/ffmpeg/doc/examples/pc-uninstalled/libavformat.pc
@@ -5,8 +5,8 @@ includedir=${pcfiledir}/../../..
Name: libavformat
Description: FFmpeg container format library
-Version: 55.0.100
-Requires: libavcodec = 55.1.100
+Version: 55.22.100
+Requires: libavcodec = 55.46.100
Conflicts:
-Libs: -L${libdir} -lavformat
+Libs: -L${libdir} -Wl,-rpath,${libdir} -lavformat
Cflags: -I${includedir}
diff --git a/ffmpeg/doc/examples/pc-uninstalled/libavutil.pc b/ffmpeg/doc/examples/pc-uninstalled/libavutil.pc
index 8a95064..85df0f0 100644
--- a/ffmpeg/doc/examples/pc-uninstalled/libavutil.pc
+++ b/ffmpeg/doc/examples/pc-uninstalled/libavutil.pc
@@ -5,8 +5,8 @@ includedir=${pcfiledir}/../../..
Name: libavutil
Description: FFmpeg utility library
-Version: 52.22.100
+Version: 52.59.100
Requires:
Conflicts:
-Libs: -L${libdir} -lavutil
+Libs: -L${libdir} -Wl,-rpath,${libdir} -lavutil
Cflags: -I${includedir}
diff --git a/ffmpeg/doc/examples/pc-uninstalled/libpostproc.pc b/ffmpeg/doc/examples/pc-uninstalled/libpostproc.pc
index 5e87c13..94da503 100644
--- a/ffmpeg/doc/examples/pc-uninstalled/libpostproc.pc
+++ b/ffmpeg/doc/examples/pc-uninstalled/libpostproc.pc
@@ -5,8 +5,8 @@ includedir=${pcfiledir}/../../..
Name: libpostproc
Description: FFmpeg postprocessing library
-Version: 52.2.100
-Requires: libavutil = 52.22.100
+Version: 52.3.100
+Requires: libavutil = 52.59.100
Conflicts:
-Libs: -L${libdir} -lpostproc
+Libs: -L${libdir} -Wl,-rpath,${libdir} -lpostproc
Cflags: -I${includedir}
diff --git a/ffmpeg/doc/examples/pc-uninstalled/libswresample.pc b/ffmpeg/doc/examples/pc-uninstalled/libswresample.pc
index 873f39d..45bfa4a 100644
--- a/ffmpeg/doc/examples/pc-uninstalled/libswresample.pc
+++ b/ffmpeg/doc/examples/pc-uninstalled/libswresample.pc
@@ -5,8 +5,8 @@ includedir=${pcfiledir}/../../..
Name: libswresample
Description: FFmpeg audio resampling library
-Version: 0.17.102
-Requires: libavutil = 52.22.100
+Version: 0.17.104
+Requires: libavutil = 52.59.100
Conflicts:
-Libs: -L${libdir} -lswresample
+Libs: -L${libdir} -Wl,-rpath,${libdir} -lswresample
Cflags: -I${includedir}
diff --git a/ffmpeg/doc/examples/pc-uninstalled/libswscale.pc b/ffmpeg/doc/examples/pc-uninstalled/libswscale.pc
index 764a10c..8693580 100644
--- a/ffmpeg/doc/examples/pc-uninstalled/libswscale.pc
+++ b/ffmpeg/doc/examples/pc-uninstalled/libswscale.pc
@@ -5,8 +5,8 @@ includedir=${pcfiledir}/../../..
Name: libswscale
Description: FFmpeg image rescaling library
-Version: 2.2.100
-Requires: libavutil = 52.22.100
+Version: 2.5.101
+Requires: libavutil = 52.59.100
Conflicts:
-Libs: -L${libdir} -lswscale
+Libs: -L${libdir} -Wl,-rpath,${libdir} -lswscale
Cflags: -I${includedir}
diff --git a/ffmpeg/doc/examples/resampling_audio.c b/ffmpeg/doc/examples/resampling_audio.c
index dd128e8..a15e042 100644
--- a/ffmpeg/doc/examples/resampling_audio.c
+++ b/ffmpeg/doc/examples/resampling_audio.c
@@ -62,7 +62,7 @@ static int get_format_from_sample_fmt(const char **fmt,
/**
* Fill dst buffer with nb_samples, generated starting from t.
*/
-void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
+static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
{
int i, j;
double tincr = 1.0 / sample_rate, *dstp = dst;
@@ -78,18 +78,6 @@ void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate,
}
}
-int alloc_samples_array_and_data(uint8_t ***data, int *linesize, int nb_channels,
- int nb_samples, enum AVSampleFormat sample_fmt, int align)
-{
- int nb_planes = av_sample_fmt_is_planar(sample_fmt) ? nb_channels : 1;
-
- *data = av_malloc(sizeof(*data) * nb_planes);
- if (!*data)
- return AVERROR(ENOMEM);
- return av_samples_alloc(*data, linesize, nb_channels,
- nb_samples, sample_fmt, align);
-}
-
int main(int argc, char **argv)
{
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
@@ -149,8 +137,8 @@ int main(int argc, char **argv)
/* allocate source and destination samples buffers */
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
- ret = alloc_samples_array_and_data(&src_data, &src_linesize, src_nb_channels,
- src_nb_samples, src_sample_fmt, 0);
+ ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
+ src_nb_samples, src_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate source samples\n");
goto end;
@@ -164,8 +152,8 @@ int main(int argc, char **argv)
/* buffer is going to be directly written to a rawaudio file, no alignment */
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
- ret = alloc_samples_array_and_data(&dst_data, &dst_linesize, dst_nb_channels,
- dst_nb_samples, dst_sample_fmt, 0);
+ ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
+ dst_nb_samples, dst_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate destination samples\n");
goto end;
@@ -196,6 +184,10 @@ int main(int argc, char **argv)
}
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
ret, dst_sample_fmt, 1);
+ if (dst_bufsize < 0) {
+ fprintf(stderr, "Could not get sample buffer size\n");
+ goto end;
+ }
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
} while (t < 10);