diff options
| author | Tim Redfern <tim@eclectronics.org> | 2013-05-28 10:34:18 +0100 |
|---|---|---|
| committer | Tim Redfern <tim@eclectronics.org> | 2013-05-28 10:34:18 +0100 |
| commit | f648d137d381d778f1ae25a25a2cc7257128123b (patch) | |
| tree | 79896759a249a3a012aed7c150d171dfa082bca1 | |
| parent | 14f3d350aa7c9abfd8bc394e87177d3f9e6b8d87 (diff) | |
further work on wrapper
| -rwxr-xr-x | rotord/libavwrapper.cpp | 549 | ||||
| -rwxr-xr-x | rotord/libavwrapper.h | 73 | ||||
| -rwxr-xr-x | rotord/rotor.h | 2 |
3 files changed, 611 insertions, 13 deletions
diff --git a/rotord/libavwrapper.cpp b/rotord/libavwrapper.cpp index 1fe054e..4b73c46 100755 --- a/rotord/libavwrapper.cpp +++ b/rotord/libavwrapper.cpp @@ -129,6 +129,8 @@ void AVPacketWrapper::free() } +//bool libav::b_is_one_time_inited = false; + ///////////////////////// // decoder methods // ///////////////////////// @@ -512,17 +514,17 @@ void libav::decoder::initialize() //reply = NULL; //ioBuffer = NULL; //avioContext = NULL; - decoder::maybeInitFFMpegLib(); + maybeInitFFMpegLib(); } -void libav::decoder::maybeInitFFMpegLib() +void libav::maybeInitFFMpegLib() { - if (decoder::b_is_one_time_inited) + if (b_is_one_time_inited) return; av_register_all(); avcodec_register_all(); avformat_network_init(); - decoder::b_is_one_time_inited = true; + b_is_one_time_inited = true; } bool libav::decoder::avtry(int result, const std::string& msg) { @@ -537,7 +539,6 @@ bool libav::decoder::avtry(int result, const std::string& msg) { return true; } -bool libav::decoder::b_is_one_time_inited = false; @@ -559,7 +560,7 @@ libav::encoder::encoder(const char * file_name, int width, int height, float _fr if (0 != (height % 2)) cerr << "WARNING: Video height is not a multiple of 2" << endl; - decoder::maybeInitFFMpegLib(); + maybeInitFFMpegLib(); container = avformat_alloc_context(); if (NULL == container) @@ -705,7 +706,7 @@ libav::encoder::encoder(const char * file_name, int width, int height, float _fr /* open the output file */ if (!(fmt->flags & AVFMT_NOFILE)) - { + { //QMutexLocker lock(&decoder::mutex); if (avio_open(&container->pb, file_name, AVIO_FLAG_WRITE) < 0) throw std::runtime_error("Error opening output video file"); @@ -827,5 +828,539 @@ libav::encoder::~encoder() picture_rgb = NULL; } +bool libav::exporter::setup(int w,int h, int bitRate, int frameRate, std::string container){ + + maybeInitFFMpegLib(); + + this->w=w; + this->h=h; + this->bitRate=bitRate; + this->frameRate=frameRate; + this->container=container; + + return true; +} + +bool libav::exporter::record(std::string filename){ + + // allocate the output media context // + avformat_alloc_output_context2(&oc, NULL, NULL, filename.c_str()); + if (!oc) { + printf("Could not deduce output format from file extension: using MPEG.\n"); + avformat_alloc_output_context2(&oc, NULL, "mpeg", filename.c_str()); + } + if (!oc) { + return false; + } + fmt = oc->oformat; + + // Add the audio and video streams using the default format codecs + // * and initialize the codecs. // + video_st = NULL; + audio_st = NULL; + + if (fmt->video_codec != AV_CODEC_ID_NONE) { + video_st = add_stream(oc, &video_codec, fmt->video_codec); + } + if (fmt->audio_codec != AV_CODEC_ID_NONE) { + audio_st = add_stream(oc, &audio_codec, fmt->audio_codec); + } + + //set initial video params + video_st->codec->width=w; + video_st->codec->height=h; + video_st->codec->time_base.num = 1;//codecCtx->ticks_per_frame; + video_st->codec->time_base.den = frameRate; + video_st->time_base = video_st->codec->time_base; + //audioStream->time_base = codecCtx->time_base; //???has the capability of crashing + + video_st->codec->gop_size = 10; /* emit one intra frame every ten frames */ + video_st->codec->pix_fmt = PIX_FMT_YUV420P; + + // Now that all the parameters are set, we can open the audio and + // * video codecs and allocate the necessary encode buffers. // + if (video_st) + open_video(oc, video_codec, video_st); + if (audio_st) { + audioframesize=open_audio(oc, audio_codec, audio_st); + audiostep=((float)audioframesize)/(audio_st->codec->sample_rate); + std::cerr << "opened audio codec with "<<audioframesize<<" frame size and "<<audiostep<<" seconds per frame"<<std::endl; + } + + + av_dump_format(oc, 0, filename.c_str(), 1); + + // open the output file, if needed // + if (!(fmt->flags & AVFMT_NOFILE)) { + int ret = avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE); + if (ret < 0) { + std::cerr <<"Could not open " << filename.c_str() << std::endl; + return false; + } + } + + // Write the stream header, if any. // + int ret = avformat_write_header(oc, NULL); + if (ret < 0) { + //std::cerr <<"Error occurred when opening output file:" << av_err2str(ret) << std::endl; + return false; + } + + if (frame) + frame->pts = 0; + + outputframe=0; + + return true; +} +bool libav::exporter::encodeFrame(unsigned char *pixels,uint16_t *samples){ + // Compute current audio and video time. // + if (audio_st) + audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; + else + audio_pts = 0.0; + + if (video_st) + video_pts = (double)video_st->pts.val * video_st->time_base.num / + video_st->time_base.den; + else + video_pts = 0.0; + + // write interleaved audio and video frames // + if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { + write_audio_frame(oc, audio_st, samples); + } else { + write_video_frame(oc, video_st, pixels); + + frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base); + } + + //std::cerr << "encoded frame " << outputframe << std::endl; + outputframe++; + + return true; +} +bool libav::exporter::encodeFrame(unsigned char *pixels,AVPacket *audio){ + // Compute current audio and video time. // + if (audio_st) + audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; + else + audio_pts = 0.0; + + if (video_st) + video_pts = (double)video_st->pts.val * video_st->time_base.num / + video_st->time_base.den; + else + video_pts = 0.0; + + // write interleaved audio and video frames // + if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { + write_audio_frame(oc, audio_st, audio); + } else { + write_video_frame(oc, video_st, pixels); + + frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base); + } + + //std::cerr << "encoded frame " << outputframe << std::endl; + outputframe++; + + return true; +} +bool libav::exporter::encodeFrame(unsigned char *pixels){ + video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; + write_video_frame(oc, video_st, pixels); + frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base); + outputframe++; + return true; +} +bool libav::exporter::encodeFrame(uint16_t *samples){ + audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; + write_audio_frame(oc, audio_st, samples); + return true; +} +void libav::exporter::finishRecord(){ + + av_write_trailer(oc); + // Close each codec. // + if (video_st) + close_video(oc, video_st); + if (audio_st) + close_audio(oc, audio_st); + + if (!(fmt->flags & AVFMT_NOFILE)) + // Close the output file. // + avio_close(oc->pb); + + // free the stream // + avformat_free_context(oc); +} + +AVStream* libav::exporter::add_stream(AVFormatContext *oc, AVCodec **codec,enum AVCodecID codec_id) + { + AVCodecContext *c; + AVStream *st; + + // find the encoder // + *codec = avcodec_find_encoder(codec_id); + if (!(*codec)) { + //fprintf(stderr, "Could not find encoder for '%s'\n", + // avcodec_get_name(codec_id)); + exit(1); + } + + st = avformat_new_stream(oc, *codec); + if (!st) { + //fprintf(stderr, "Could not allocate stream\n"); + exit(1); + } + st->id = oc->nb_streams-1; + c = st->codec; + + switch ((*codec)->type) { + case AVMEDIA_TYPE_AUDIO: + st->id = 1; + c->sample_fmt = AV_SAMPLE_FMT_S16; + c->bit_rate = 64000; + c->sample_rate = 44100; + c->channels = 2; + c->channel_layout=AV_CH_LAYOUT_STEREO; + break; + + case AVMEDIA_TYPE_VIDEO: + c->codec_id = codec_id; + + c->bit_rate = 400000; + // Resolution must be a multiple of two. // + c->width = 352; + c->height = 288; + // timebase: This is the fundamental unit of time (in seconds) in terms + // * of which frame timestamps are represented. For fixed-fps content, + // * timebase should be 1/framerate and timestamp increments should be + // * identical to 1. // + c->time_base.den = frameRate; + c->time_base.num = 1; + c->gop_size = 12; // emit one intra frame every twelve frames at most // + c->pix_fmt = AV_PIX_FMT_YUV420P; //ADDED HARDCODED TJR 280513 + if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { + // just for testing, we also add B frames // + c->max_b_frames = 2; + } + if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { + // Needed to avoid using macroblocks in which some coeffs overflow. + // * This does not happen with normal video, it just happens here as + // * the motion of the chroma plane does not match the luma plane. // + c->mb_decision = 2; + } + break; + default: + break; + } + + // Some formats want stream headers to be separate. // + if (oc->oformat->flags & AVFMT_GLOBALHEADER) + c->flags |= CODEC_FLAG_GLOBAL_HEADER; + + return st; + } +void libav::exporter::open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) + { + int ret; + AVCodecContext *c = st->codec; + + // open the codec // + ret = avcodec_open2(c, codec, NULL); + if (ret < 0) { + //fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); + exit(1); + } + + // allocate and init a re-usable frame // + frame = avcodec_alloc_frame(); + if (!frame) { + //fprintf(stderr, "Could not allocate video frame\n"); + exit(1); + } + + // Allocate the encoded raw picture. // + ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height); + if (ret < 0) { + //fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret)); + exit(1); + } + + // If the output format is not YUV420P, then a temporary YUV420P + // * picture is needed too. It is then converted to the required + // * output format. // + if (c->pix_fmt != AV_PIX_FMT_YUV420P) { + ret = avpicture_alloc(&src_picture, AV_PIX_FMT_RGB24, c->width, c->height); + if (ret < 0) { + //fprintf(stderr, "Could not allocate temporary picture: %s\n", + // av_err2str(ret)); + exit(1); + } + } + + // copy data and linesize picture pointers to frame // + *((AVPicture *)frame) = dst_picture; + + outPixels = (uint8_t*)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, st->codec->width,st->codec->height)); + } + + int libav::exporter::open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) + { + AVCodecContext *c; + int ret; + + c = st->codec; + + // open it // + ret = avcodec_open2(c, codec, NULL); + if (ret < 0) { + //fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); + exit(1); + } + + // init signal generator // + t = 0; + tincr = 2 * M_PI * 110.0 / c->sample_rate; + // increment frequency by 110 Hz per second // + tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; + + if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) + audio_input_frame_size = 10000; + else + audio_input_frame_size = c->frame_size; + + /* + samples = av_malloc(audio_input_frame_size * + av_get_bytes_per_sample(c->sample_fmt) * + c->channels); + if (!samples) { + //fprintf(stderr, "Could not allocate audio samples buffer\n"); + exit(1); + } + */ + return audio_input_frame_size; + } + + void libav::exporter::write_audio_frame(AVFormatContext *oc, AVStream *st,uint16_t *samples) + { + AVCodecContext *c; + AVPacket pkt = { 0 }; // data and size must be 0; + AVFrame *frame = avcodec_alloc_frame(); + int got_packet, ret; + + av_init_packet(&pkt); + c = st->codec; + + //get_audio_frame(samples, audio_input_frame_size, c->channels); + frame->nb_samples = audio_input_frame_size; + uint8_t *sampleptr; + int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels; + if (samples) { + sampleptr=(uint8_t*)samples; + } + else { + sampleptr=new uint8_t[bufsize]; + memset(sampleptr,0,bufsize); + } + + avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, + sampleptr, + audio_input_frame_size * + av_get_bytes_per_sample(c->sample_fmt) * + c->channels, 0); //; + //frame->sample_rate=44100; //hard coded input rate- nope, this doesn't help + //frame->format=AV_SAMPLE_FMT_S16P; + //?? why is ffmpeg reporting fltp as the sample format??? doesn't seem to have an effect to change this though + ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); + if (!samples) { + delete[] sampleptr; + } + if (ret < 0) { + //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); + exit(1); + } + + if (!got_packet) + return; + + pkt.stream_index = st->index; + + // Write the compressed frame to the media file. // + ret = av_interleaved_write_frame(oc, &pkt); + if (ret != 0) { + //fprintf(stderr, "Error while writing audio frame: %s\n", + // av_err2str(ret)); + exit(1); + } + avcodec_free_frame(&frame); + } + + void libav::exporter::write_audio_frame(AVFormatContext *oc, AVStream *st,AVPacket *pkt) + { + /* + AVCodecContext *c; + AVPacket pkt = { 0 }; // data and size must be 0; + AVFrame *frame = avcodec_alloc_frame(); + int got_packet, ret; + + av_init_packet(&pkt); + c = st->codec; + + //get_audio_frame(samples, audio_input_frame_size, c->channels); + frame->nb_samples = audio_input_frame_size; + uint8_t *sampleptr; + int bufsize=audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) *c->channels; + if (samples) { + sampleptr=(uint8_t*)samples; + } + else { + sampleptr=new uint8_t[bufsize]; + memset(sampleptr,0,bufsize); + } + avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, + sampleptr, + audio_input_frame_size * + av_get_bytes_per_sample(c->sample_fmt) * + c->channels, 1); + + ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); + if (!samples) { + free(sampleptr); + } + if (ret < 0) { + //fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); + exit(1); + } + + if (!got_packet) + return; + */ + + pkt->stream_index = st->index; + + // Write the compressed frame to the media file. // + int ret = av_interleaved_write_frame(oc, pkt); + if (ret != 0) { + //fprintf(stderr, "Error while writing audio frame: %s\n", + // av_err2str(ret)); + exit(1); + } + //avcodec_free_frame(&frame); + av_free_packet(pkt); + } + + void libav::exporter::close_audio(AVFormatContext *oc, AVStream *st) + { + avcodec_close(st->codec); + + + } + + void libav::exporter::write_video_frame(AVFormatContext *oc, AVStream *st, uint8_t *pixels) + { + int ret; + static struct SwsContext *sws_ctx; + AVCodecContext *c = st->codec; + +/* + if (frame_count >= STREAM_NB_FRAMES) { + // No more frames to compress. The codec has a latency of a few + // * frames if using B-frames, so we get the last frames by + // * passing the same picture again. // + } else { + if (c->pix_fmt != AV_PIX_FMT_YUV420P) { + // as we only generate a YUV420P picture, we must convert it + // * to the codec pixel format if needed // + if (!sws_ctx) { + sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, + c->width, c->height, c->pix_fmt, + sws_flags, NULL, NULL, NULL); + if (!sws_ctx) { + //fprintf(stderr, + // "Could not initialize the conversion context\n"); + exit(1); + } + } + fill_yuv_image(&src_picture, frame_count, c->width, c->height); + sws_scale(sws_ctx, + (const uint8_t * const *)src_picture.data, src_picture.linesize, + 0, c->height, dst_picture.data, dst_picture.linesize); + } else { + fill_yuv_image(&dst_picture, frame_count, c->width, c->height); + } + } +*/ + //always convert RGB to YUV + //should be context allocated once per render instead of per frame?? + // + // + sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24, + c->width, c->height, AV_PIX_FMT_YUV420P, + sws_flags, NULL, NULL, NULL); + + avpicture_fill(&src_picture, pixels, PIX_FMT_RGB24, c->width,c->height); + //avpicture_fill(&dst_picture, outPixels, PIX_FMT_YUV420P, c->width,c->height); + + sws_scale(sws_ctx, src_picture.data, src_picture.linesize, 0, c->height, dst_picture.data, dst_picture.linesize); + //fill_yuv_image(&dst_picture, frame_count, c->width, c->height); + if (oc->oformat->flags & AVFMT_RAWPICTURE) { + // Raw video case - directly store the picture in the packet // + AVPacket pkt; + av_init_packet(&pkt); + + pkt.flags |= AV_PKT_FLAG_KEY; + pkt.stream_index = st->index; + pkt.data = dst_picture.data[0]; + pkt.size = sizeof(AVPicture); + + ret = av_interleaved_write_frame(oc, &pkt); + } else { + AVPacket pkt = { 0 }; + int got_packet; + av_init_packet(&pkt); + + // encode the image // + ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); + if (ret < 0) { + //fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); + exit(1); + } + // If size is zero, it means the image was buffered. // + + if (!ret && got_packet && pkt.size) { + pkt.stream_index = st->index; + + // Write the compressed frame to the media file. // + ret = av_interleaved_write_frame(oc, &pkt); + } else { + ret = 0; + } + } + + // + // added 22 may in memory leak run + // + sws_freeContext(sws_ctx); //should be done once per render instead of per frame?? + + if (ret != 0) { + //fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); + exit(1); + } + frame_count++; + + //avcodec_free_frame(&frame); + } + + void libav::exporter::close_video(AVFormatContext *oc, AVStream *st) + { + avcodec_close(st->codec); + av_free(src_picture.data[0]); + av_free(dst_picture.data[0]); + av_free(frame); + av_free(outPixels); //SIGSEV here??? + } diff --git a/rotord/libavwrapper.h b/rotord/libavwrapper.h index c06a94d..a03cbbe 100755 --- a/rotord/libavwrapper.h +++ b/rotord/libavwrapper.h @@ -47,6 +47,13 @@ extern "C" { namespace libav { + static bool b_is_one_time_inited=false; + // Some libavcodec calls are not reentrant + //static QMutex mutex; + void maybeInitFFMpegLib(); + + static int sws_flags = SWS_BICUBIC; + // Translated to C++ by Christopher Bruns May 2012 // from ffmeg_adapt.c in whisk package by Nathan Clack, Mark Bolstadt, Michael Meeuwisse class decoder @@ -59,10 +66,7 @@ namespace libav { BLUE = 2, ALPHA = 3 }; - - // Some libavcodec calls are not reentrant - //static QMutex mutex; - static void maybeInitFFMpegLib(); + decoder(PixelFormat pixelFormat=PIX_FMT_RGB24); //decoder(QUrl url, PixelFormat pixelFormat=PIX_FMT_RGB24); @@ -92,7 +96,7 @@ namespace libav { bool open(char* fileName, enum PixelFormat formatParam = PIX_FMT_RGB24); protected: - static bool b_is_one_time_inited; + void initialize(); @@ -152,6 +156,65 @@ namespace libav { float audiostep; }; + + class exporter { + public: + virtual ~exporter(); + bool setup(int w,int h, int bitRate, int frameRate, std::string container); + bool record(std::string filename); + bool encodeFrame(unsigned char *pixels, uint16_t *samples); + bool encodeFrame(unsigned char *pixels,AVPacket *audiopkt); //is possible to just copy the packets? + bool encodeFrame(unsigned char *pixels); + bool encodeFrame(uint16_t *samples); + void finishRecord(); + int get_audio_framesize(){return audioframesize;}; + float get_audio_step(){return audiostep;}; + + AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,enum AVCodecID codec_id); + void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st); + int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st); + + void write_audio_frame(AVFormatContext *oc, AVStream *st,uint16_t *samples); + void write_audio_frame(AVFormatContext *oc, AVStream *st,AVPacket *pkt); + void close_audio(AVFormatContext *oc, AVStream *st); + + void write_video_frame(AVFormatContext *oc, AVStream *st, uint8_t *pixels); + void close_video(AVFormatContext *oc, AVStream *st); + + private: + AVOutputFormat *fmt; + AVFormatContext *oc; + AVStream *audio_st, *video_st; + AVCodec *audio_codec, *video_codec; + double audio_pts, video_pts; + + int audioframesize; + float audiostep; + int w; + int h; + int bitRate; + int frameRate; + std::string container; + + int outputframe; + + // video output // + + AVFrame *frame; + AVPicture src_picture, dst_picture; + int frame_count; + uint8_t *outPixels; + + + //************************************************************// + // audio output // + + float t, tincr, tincr2; + int audio_input_frame_size; + + + }; + } diff --git a/rotord/rotor.h b/rotord/rotor.h index 4f94151..e9b272c 100755 --- a/rotord/rotor.h +++ b/rotord/rotor.h @@ -62,7 +62,7 @@ extern "C" { #include "vampHost.h" #include "xmlIO.h" #include "libavaudioloader.h" -#include "libavexporter.h" +//#include "libavexporter.h" #include "utils.h" //fequal #include "libavwrapper.h" |
