diff options
| author | Tim Redfern <tim@eclectronics.org> | 2013-12-29 12:19:38 +0000 |
|---|---|---|
| committer | Tim Redfern <tim@eclectronics.org> | 2013-12-29 12:19:38 +0000 |
| commit | f7813a5324be39d13ab536c245d15dfc602a7849 (patch) | |
| tree | fad99148b88823d34a5df2f0a25881a002eb291b /ffmpeg/doc/examples/muxing.c | |
| parent | b7a5a477b8ff4d4e3028b9dfb9a9df0a41463f92 (diff) | |
basic type mechanism working
Diffstat (limited to 'ffmpeg/doc/examples/muxing.c')
| -rw-r--r-- | ffmpeg/doc/examples/muxing.c | 159 |
1 files changed, 111 insertions, 48 deletions
diff --git a/ffmpeg/doc/examples/muxing.c b/ffmpeg/doc/examples/muxing.c index 7305cc6..4cd3f65 100644 --- a/ffmpeg/doc/examples/muxing.c +++ b/ffmpeg/doc/examples/muxing.c @@ -34,9 +34,11 @@ #include <string.h> #include <math.h> +#include <libavutil/opt.h> #include <libavutil/mathematics.h> #include <libavformat/avformat.h> #include <libswscale/swscale.h> +#include <libswresample/swresample.h> /* 5 seconds stream duration */ #define STREAM_DURATION 200.0 @@ -46,13 +48,6 @@ static int sws_flags = SWS_BICUBIC; -/**************************************************************/ -/* audio output */ - -static float t, tincr, tincr2; -static int16_t *samples; -static int audio_input_frame_size; - /* Add an output stream. */ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id) @@ -78,8 +73,7 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, switch ((*codec)->type) { case AVMEDIA_TYPE_AUDIO: - st->id = 1; - c->sample_fmt = AV_SAMPLE_FMT_S16; + c->sample_fmt = AV_SAMPLE_FMT_FLTP; c->bit_rate = 64000; c->sample_rate = 44100; c->channels = 2; @@ -127,8 +121,17 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, /* audio output */ static float t, tincr, tincr2; -static int16_t *samples; -static int audio_input_frame_size; + +static uint8_t **src_samples_data; +static int src_samples_linesize; +static int src_nb_samples; + +static int max_dst_nb_samples; +uint8_t **dst_samples_data; +int dst_samples_linesize; +int dst_samples_size; + +struct SwrContext *swr_ctx = NULL; static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) { @@ -150,17 +153,54 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) /* increment frequency by 110 Hz per second */ tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; - if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) - audio_input_frame_size = 10000; - else - audio_input_frame_size = c->frame_size; - samples = av_malloc(audio_input_frame_size * - av_get_bytes_per_sample(c->sample_fmt) * - c->channels); - if (!samples) { - fprintf(stderr, "Could not allocate audio samples buffer\n"); + src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ? + 10000 : c->frame_size; + + ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels, + src_nb_samples, AV_SAMPLE_FMT_S16, 0); + if (ret < 0) { + fprintf(stderr, "Could not allocate source samples\n"); exit(1); } + + /* compute the number of converted samples: buffering is avoided + * ensuring that the output buffer will contain at least all the + * converted input samples */ + max_dst_nb_samples = src_nb_samples; + + /* create resampler context */ + if (c->sample_fmt != AV_SAMPLE_FMT_S16) { + swr_ctx = swr_alloc(); + if (!swr_ctx) { + fprintf(stderr, "Could not allocate resampler context\n"); + exit(1); + } + + /* set options */ + av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0); + av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0); + av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); + av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0); + av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0); + av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0); + + /* initialize the resampling context */ + if ((ret = swr_init(swr_ctx)) < 0) { + fprintf(stderr, "Failed to initialize the resampling context\n"); + exit(1); + } + + ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels, + max_dst_nb_samples, c->sample_fmt, 0); + if (ret < 0) { + fprintf(stderr, "Could not allocate destination samples\n"); + exit(1); + } + } else { + dst_samples_data = src_samples_data; + } + dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples, + c->sample_fmt, 0); } /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and @@ -184,19 +224,45 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) { AVCodecContext *c; AVPacket pkt = { 0 }; // data and size must be 0; - AVFrame *frame = avcodec_alloc_frame(); - int got_packet, ret; + AVFrame *frame = av_frame_alloc(); + int got_packet, ret, dst_nb_samples; av_init_packet(&pkt); c = st->codec; - get_audio_frame(samples, audio_input_frame_size, c->channels); - frame->nb_samples = audio_input_frame_size; + get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels); + + /* convert samples from native format to destination codec format, using the resampler */ + if (swr_ctx) { + /* compute destination number of samples */ + dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples, + c->sample_rate, c->sample_rate, AV_ROUND_UP); + if (dst_nb_samples > max_dst_nb_samples) { + av_free(dst_samples_data[0]); + ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels, + dst_nb_samples, c->sample_fmt, 0); + if (ret < 0) + exit(1); + max_dst_nb_samples = dst_nb_samples; + dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples, + c->sample_fmt, 0); + } + + /* convert to destination format */ + ret = swr_convert(swr_ctx, + dst_samples_data, dst_nb_samples, + (const uint8_t **)src_samples_data, src_nb_samples); + if (ret < 0) { + fprintf(stderr, "Error while converting\n"); + exit(1); + } + } else { + dst_nb_samples = src_nb_samples; + } + + frame->nb_samples = dst_nb_samples; avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, - (uint8_t *)samples, - audio_input_frame_size * - av_get_bytes_per_sample(c->sample_fmt) * - c->channels, 1); + dst_samples_data[0], dst_samples_size, 0); ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); if (ret < 0) { @@ -205,7 +271,7 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) } if (!got_packet) - return; + goto freeframe; pkt.stream_index = st->index; @@ -216,14 +282,19 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st) av_err2str(ret)); exit(1); } - avcodec_free_frame(&frame); +freeframe: + av_frame_free(&frame); } static void close_audio(AVFormatContext *oc, AVStream *st) { avcodec_close(st->codec); - - av_free(samples); + if (dst_samples_data != src_samples_data) { + av_free(dst_samples_data[0]); + av_free(dst_samples_data); + } + av_free(src_samples_data[0]); + av_free(src_samples_data); } /**************************************************************/ @@ -246,7 +317,7 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) } /* allocate and init a re-usable frame */ - frame = avcodec_alloc_frame(); + frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate video frame\n"); exit(1); @@ -388,7 +459,7 @@ int main(int argc, char **argv) AVFormatContext *oc; AVStream *audio_st, *video_st; AVCodec *audio_codec, *video_codec; - double audio_pts, video_pts; + double audio_time, video_time; int ret; /* Initialize libavcodec, and register all codecs and formats. */ @@ -461,23 +532,15 @@ int main(int argc, char **argv) frame->pts = 0; for (;;) { /* Compute current audio and video time. */ - if (audio_st) - audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; - else - audio_pts = 0.0; - - if (video_st) - video_pts = (double)video_st->pts.val * video_st->time_base.num / - video_st->time_base.den; - else - video_pts = 0.0; - - if ((!audio_st || audio_pts >= STREAM_DURATION) && - (!video_st || video_pts >= STREAM_DURATION)) + audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0; + video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0; + + if ((!audio_st || audio_time >= STREAM_DURATION) && + (!video_st || video_time >= STREAM_DURATION)) break; /* write interleaved audio and video frames */ - if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { + if (!video_st || (video_st && audio_st && audio_time < video_time)) { write_audio_frame(oc, audio_st); } else { write_video_frame(oc, video_st); |
